2024-11-20 20:35:27,222 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-20 20:35:27,245 main DEBUG Took 0.020493 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-20 20:35:27,246 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-20 20:35:27,246 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-20 20:35:27,248 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-20 20:35:27,249 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 20:35:27,259 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-20 20:35:27,279 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 20:35:27,281 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 20:35:27,283 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 20:35:27,283 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 20:35:27,284 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 20:35:27,285 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 20:35:27,287 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 20:35:27,287 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 20:35:27,288 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 20:35:27,289 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 20:35:27,291 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 20:35:27,291 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 20:35:27,292 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 20:35:27,293 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 20:35:27,294 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 20:35:27,294 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 20:35:27,295 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 20:35:27,296 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 20:35:27,297 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 20:35:27,297 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 20:35:27,298 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 20:35:27,299 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 20:35:27,300 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 20:35:27,300 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 20:35:27,301 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 20:35:27,301 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-20 20:35:27,303 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 20:35:27,305 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-20 20:35:27,308 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-20 20:35:27,309 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-20 20:35:27,311 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-20 20:35:27,312 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-20 20:35:27,325 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-20 20:35:27,333 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-20 20:35:27,339 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-20 20:35:27,340 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-20 20:35:27,341 main DEBUG createAppenders(={Console}) 2024-11-20 20:35:27,342 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-20 20:35:27,343 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-20 20:35:27,344 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-20 20:35:27,345 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-20 20:35:27,345 main DEBUG OutputStream closed 2024-11-20 20:35:27,345 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-20 20:35:27,346 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-20 20:35:27,346 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-20 20:35:27,445 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-20 20:35:27,448 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-20 20:35:27,449 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-20 20:35:27,450 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-20 20:35:27,451 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-20 20:35:27,451 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-20 20:35:27,452 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-20 20:35:27,453 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-20 20:35:27,453 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-20 20:35:27,454 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-20 20:35:27,454 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-20 20:35:27,455 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-20 20:35:27,455 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-20 20:35:27,455 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-20 20:35:27,456 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-20 20:35:27,456 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-20 20:35:27,456 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-20 20:35:27,457 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-20 20:35:27,460 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20 20:35:27,460 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-20 20:35:27,461 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-20 20:35:27,462 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-20T20:35:27,783 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c 2024-11-20 20:35:27,787 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-20 20:35:27,788 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20T20:35:27,804 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-20T20:35:27,854 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=901, ProcessCount=11, AvailableMemoryMB=2476 2024-11-20T20:35:27,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T20:35:27,881 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/cluster_b9a71e61-fd07-5b7b-2c21-0ad8e2ea8251, deleteOnExit=true 2024-11-20T20:35:27,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T20:35:27,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/test.cache.data in system properties and HBase conf 2024-11-20T20:35:27,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T20:35:27,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/hadoop.log.dir in system properties and HBase conf 2024-11-20T20:35:27,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T20:35:27,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T20:35:27,889 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T20:35:28,007 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-20T20:35:28,132 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T20:35:28,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T20:35:28,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T20:35:28,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T20:35:28,143 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T20:35:28,143 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T20:35:28,144 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T20:35:28,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T20:35:28,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T20:35:28,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T20:35:28,148 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/nfs.dump.dir in system properties and HBase conf 2024-11-20T20:35:28,149 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/java.io.tmpdir in system properties and HBase conf 2024-11-20T20:35:28,149 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T20:35:28,150 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T20:35:28,151 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T20:35:28,864 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T20:35:29,792 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-20T20:35:29,966 INFO [Time-limited test {}] log.Log(170): Logging initialized @3671ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-20T20:35:30,093 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:35:30,227 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:35:30,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:35:30,320 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:35:30,323 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T20:35:30,352 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:35:30,358 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aee6cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:35:30,359 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@380b8195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:35:30,650 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6de997b9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/java.io.tmpdir/jetty-localhost-35387-hadoop-hdfs-3_4_1-tests_jar-_-any-9394442181922544232/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T20:35:30,660 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a0da00a{HTTP/1.1, (http/1.1)}{localhost:35387} 2024-11-20T20:35:30,660 INFO [Time-limited test {}] server.Server(415): Started @4366ms 2024-11-20T20:35:30,694 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T20:35:31,660 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:35:31,674 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:35:31,701 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:35:31,701 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:35:31,701 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T20:35:31,705 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3305dd74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:35:31,706 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7893eb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:35:31,872 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f93babe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/java.io.tmpdir/jetty-localhost-44667-hadoop-hdfs-3_4_1-tests_jar-_-any-12465324787241826104/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:35:31,874 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@737d6c99{HTTP/1.1, (http/1.1)}{localhost:44667} 2024-11-20T20:35:31,875 INFO [Time-limited test {}] server.Server(415): Started @5581ms 2024-11-20T20:35:32,006 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:35:32,349 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:35:32,357 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:35:32,363 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:35:32,364 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:35:32,364 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T20:35:32,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ff5148a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:35:32,370 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25ca9bb3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:35:32,524 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c963ecd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/java.io.tmpdir/jetty-localhost-33597-hadoop-hdfs-3_4_1-tests_jar-_-any-4601952342091250108/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:35:32,525 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a10aed{HTTP/1.1, (http/1.1)}{localhost:33597} 2024-11-20T20:35:32,525 INFO [Time-limited test {}] server.Server(415): Started @6231ms 2024-11-20T20:35:32,531 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:35:34,210 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/cluster_b9a71e61-fd07-5b7b-2c21-0ad8e2ea8251/data/data1/current/BP-1586463130-172.17.0.2-1732134929023/current, will proceed with Du for space computation calculation, 2024-11-20T20:35:34,210 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/cluster_b9a71e61-fd07-5b7b-2c21-0ad8e2ea8251/data/data2/current/BP-1586463130-172.17.0.2-1732134929023/current, will proceed with Du for space computation calculation, 2024-11-20T20:35:34,213 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/cluster_b9a71e61-fd07-5b7b-2c21-0ad8e2ea8251/data/data3/current/BP-1586463130-172.17.0.2-1732134929023/current, will proceed with Du for space computation calculation, 2024-11-20T20:35:34,214 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/cluster_b9a71e61-fd07-5b7b-2c21-0ad8e2ea8251/data/data4/current/BP-1586463130-172.17.0.2-1732134929023/current, will proceed with Du for space computation calculation, 2024-11-20T20:35:34,355 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:35:34,363 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:35:34,466 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6573b13891e1b700 with lease ID 0xb989c5896ee61116: Processing first storage report for DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956 from datanode DatanodeRegistration(127.0.0.1:46327, datanodeUuid=22152500-c199-4d70-afdd-965b92c17c61, infoPort=40601, infoSecurePort=0, ipcPort=35709, storageInfo=lv=-57;cid=testClusterID;nsid=772407360;c=1732134929023) 2024-11-20T20:35:34,468 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6573b13891e1b700 with lease ID 0xb989c5896ee61116: from storage DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956 node DatanodeRegistration(127.0.0.1:46327, datanodeUuid=22152500-c199-4d70-afdd-965b92c17c61, infoPort=40601, infoSecurePort=0, ipcPort=35709, storageInfo=lv=-57;cid=testClusterID;nsid=772407360;c=1732134929023), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-20T20:35:34,468 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x77a011c8e2632a04 with lease ID 0xb989c5896ee61115: Processing first storage report for DS-37a32f44-9931-4c9e-93d6-4448f8194e57 from datanode DatanodeRegistration(127.0.0.1:42689, datanodeUuid=dc5b1e8b-d666-49ce-a9f2-894fc7b0d778, infoPort=36999, infoSecurePort=0, ipcPort=34597, storageInfo=lv=-57;cid=testClusterID;nsid=772407360;c=1732134929023) 2024-11-20T20:35:34,469 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x77a011c8e2632a04 with lease ID 0xb989c5896ee61115: from storage DS-37a32f44-9931-4c9e-93d6-4448f8194e57 node DatanodeRegistration(127.0.0.1:42689, datanodeUuid=dc5b1e8b-d666-49ce-a9f2-894fc7b0d778, infoPort=36999, infoSecurePort=0, ipcPort=34597, storageInfo=lv=-57;cid=testClusterID;nsid=772407360;c=1732134929023), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:35:34,469 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6573b13891e1b700 with lease ID 0xb989c5896ee61116: Processing first storage report for DS-c7e2ee7f-a360-42b9-9e2c-0ccf11fb3056 from datanode DatanodeRegistration(127.0.0.1:46327, datanodeUuid=22152500-c199-4d70-afdd-965b92c17c61, infoPort=40601, infoSecurePort=0, ipcPort=35709, storageInfo=lv=-57;cid=testClusterID;nsid=772407360;c=1732134929023) 2024-11-20T20:35:34,469 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6573b13891e1b700 with lease ID 0xb989c5896ee61116: from storage DS-c7e2ee7f-a360-42b9-9e2c-0ccf11fb3056 node DatanodeRegistration(127.0.0.1:46327, datanodeUuid=22152500-c199-4d70-afdd-965b92c17c61, infoPort=40601, infoSecurePort=0, ipcPort=35709, storageInfo=lv=-57;cid=testClusterID;nsid=772407360;c=1732134929023), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:35:34,470 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x77a011c8e2632a04 with lease ID 0xb989c5896ee61115: Processing first storage report for DS-05375134-6b24-4788-95ae-f10b6a704ce3 from datanode DatanodeRegistration(127.0.0.1:42689, datanodeUuid=dc5b1e8b-d666-49ce-a9f2-894fc7b0d778, infoPort=36999, infoSecurePort=0, ipcPort=34597, storageInfo=lv=-57;cid=testClusterID;nsid=772407360;c=1732134929023) 2024-11-20T20:35:34,470 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x77a011c8e2632a04 with lease ID 0xb989c5896ee61115: from storage DS-05375134-6b24-4788-95ae-f10b6a704ce3 node DatanodeRegistration(127.0.0.1:42689, datanodeUuid=dc5b1e8b-d666-49ce-a9f2-894fc7b0d778, infoPort=36999, infoSecurePort=0, ipcPort=34597, storageInfo=lv=-57;cid=testClusterID;nsid=772407360;c=1732134929023), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:35:34,489 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c 2024-11-20T20:35:34,751 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/cluster_b9a71e61-fd07-5b7b-2c21-0ad8e2ea8251/zookeeper_0, clientPort=59326, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/cluster_b9a71e61-fd07-5b7b-2c21-0ad8e2ea8251/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/cluster_b9a71e61-fd07-5b7b-2c21-0ad8e2ea8251/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T20:35:34,770 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59326 2024-11-20T20:35:34,783 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:35:34,786 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:35:35,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741825_1001 (size=7) 2024-11-20T20:35:35,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741825_1001 (size=7) 2024-11-20T20:35:35,734 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18 with version=8 2024-11-20T20:35:35,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/hbase-staging 2024-11-20T20:35:35,867 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-20T20:35:36,188 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d7d59561a64e:0 server-side Connection retries=45 2024-11-20T20:35:36,203 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:35:36,203 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T20:35:36,210 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T20:35:36,210 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:35:36,210 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T20:35:36,411 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T20:35:36,493 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-20T20:35:36,506 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-20T20:35:36,511 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T20:35:36,544 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 77376 (auto-detected) 2024-11-20T20:35:36,545 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-20T20:35:36,569 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39077 2024-11-20T20:35:36,601 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39077 connecting to ZooKeeper ensemble=127.0.0.1:59326 2024-11-20T20:35:36,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:390770x0, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T20:35:36,758 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39077-0x10159f5945f0000 connected 2024-11-20T20:35:37,124 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:35:37,128 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:35:37,142 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:35:37,149 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18, hbase.cluster.distributed=false 2024-11-20T20:35:37,182 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T20:35:37,188 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39077 2024-11-20T20:35:37,192 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39077 2024-11-20T20:35:37,195 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39077 2024-11-20T20:35:37,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39077 2024-11-20T20:35:37,200 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39077 2024-11-20T20:35:37,361 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d7d59561a64e:0 server-side Connection retries=45 2024-11-20T20:35:37,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:35:37,364 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T20:35:37,364 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T20:35:37,364 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:35:37,364 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T20:35:37,368 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T20:35:37,377 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T20:35:37,382 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41209 2024-11-20T20:35:37,386 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41209 connecting to ZooKeeper ensemble=127.0.0.1:59326 2024-11-20T20:35:37,387 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:35:37,396 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:35:37,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412090x0, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T20:35:37,586 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41209-0x10159f5945f0001 connected 2024-11-20T20:35:37,586 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:35:37,592 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T20:35:37,602 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T20:35:37,609 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T20:35:37,615 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T20:35:37,617 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41209 2024-11-20T20:35:37,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41209 2024-11-20T20:35:37,621 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41209 2024-11-20T20:35:37,624 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41209 2024-11-20T20:35:37,624 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41209 2024-11-20T20:35:37,641 DEBUG [M:0;d7d59561a64e:39077 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d7d59561a64e:39077 2024-11-20T20:35:37,642 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d7d59561a64e,39077,1732134935979 2024-11-20T20:35:37,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:35:37,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:35:37,661 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d7d59561a64e,39077,1732134935979 2024-11-20T20:35:37,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T20:35:37,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:35:37,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:35:37,692 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T20:35:37,693 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d7d59561a64e,39077,1732134935979 from backup master directory 2024-11-20T20:35:37,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d7d59561a64e,39077,1732134935979 2024-11-20T20:35:37,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:35:37,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:35:37,709 WARN [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T20:35:37,709 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d7d59561a64e,39077,1732134935979 2024-11-20T20:35:37,711 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-20T20:35:37,713 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-20T20:35:37,786 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/hbase.id] with ID: ca97f1e1-2cc2-4c21-8354-9e73d4efee3a 2024-11-20T20:35:37,786 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/.tmp/hbase.id 2024-11-20T20:35:37,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741826_1002 (size=42) 2024-11-20T20:35:37,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741826_1002 (size=42) 2024-11-20T20:35:38,211 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/.tmp/hbase.id]:[hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/hbase.id] 2024-11-20T20:35:38,278 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:35:38,284 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T20:35:38,309 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 22ms. 2024-11-20T20:35:38,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:35:38,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:35:38,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741827_1003 (size=196) 2024-11-20T20:35:38,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741827_1003 (size=196) 2024-11-20T20:35:38,387 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T20:35:38,390 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T20:35:38,399 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:35:38,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741828_1004 (size=1189) 2024-11-20T20:35:38,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741828_1004 (size=1189) 2024-11-20T20:35:38,497 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store 2024-11-20T20:35:38,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741829_1005 (size=34) 2024-11-20T20:35:38,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741829_1005 (size=34) 2024-11-20T20:35:38,548 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-20T20:35:38,555 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:35:38,556 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T20:35:38,557 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:35:38,557 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:35:38,559 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T20:35:38,560 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:35:38,560 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:35:38,562 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732134938556Disabling compacts and flushes for region at 1732134938556Disabling writes for close at 1732134938560 (+4 ms)Writing region close event to WAL at 1732134938560Closed at 1732134938560 2024-11-20T20:35:38,566 WARN [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/.initializing 2024-11-20T20:35:38,567 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/WALs/d7d59561a64e,39077,1732134935979 2024-11-20T20:35:38,616 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C39077%2C1732134935979, suffix=, logDir=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/WALs/d7d59561a64e,39077,1732134935979, archiveDir=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/oldWALs, maxLogs=10 2024-11-20T20:35:38,630 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C39077%2C1732134935979.1732134938623 2024-11-20T20:35:38,677 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/WALs/d7d59561a64e,39077,1732134935979/d7d59561a64e%2C39077%2C1732134935979.1732134938623 2024-11-20T20:35:38,694 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40601:40601),(127.0.0.1/127.0.0.1:36999:36999)] 2024-11-20T20:35:38,705 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:35:38,708 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:35:38,715 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:35:38,716 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:35:38,770 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:35:38,809 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T20:35:38,815 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:35:38,827 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:35:38,832 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:35:38,843 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T20:35:38,844 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:35:38,847 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:35:38,847 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:35:38,852 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T20:35:38,852 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:35:38,853 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:35:38,854 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:35:38,861 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T20:35:38,861 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:35:38,862 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:35:38,863 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:35:38,867 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:35:38,868 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:35:38,876 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:35:38,877 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:35:38,882 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T20:35:38,890 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:35:38,896 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:35:38,898 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873087, jitterRate=0.11018826067447662}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T20:35:38,908 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732134938734Initializing all the Stores at 1732134938738 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732134938739 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732134938740 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732134938740Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732134938740Cleaning up temporary data from old regions at 1732134938877 (+137 ms)Region opened successfully at 1732134938907 (+30 ms) 2024-11-20T20:35:38,912 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T20:35:38,960 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b582434, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d7d59561a64e/172.17.0.2:0 2024-11-20T20:35:39,008 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T20:35:39,023 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T20:35:39,024 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T20:35:39,027 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T20:35:39,036 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 8 msec 2024-11-20T20:35:39,044 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 7 msec 2024-11-20T20:35:39,044 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T20:35:39,108 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T20:35:39,123 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T20:35:39,144 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T20:35:39,148 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T20:35:39,154 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T20:35:39,172 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T20:35:39,176 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T20:35:39,185 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T20:35:39,195 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T20:35:39,200 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T20:35:39,213 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T20:35:39,253 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T20:35:39,268 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T20:35:39,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T20:35:39,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T20:35:39,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:35:39,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:35:39,284 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d7d59561a64e,39077,1732134935979, sessionid=0x10159f5945f0000, setting cluster-up flag (Was=false) 2024-11-20T20:35:39,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:35:39,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:35:39,364 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T20:35:39,367 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d7d59561a64e,39077,1732134935979 2024-11-20T20:35:39,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:35:39,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:35:39,427 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T20:35:39,430 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d7d59561a64e,39077,1732134935979 2024-11-20T20:35:39,440 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T20:35:39,555 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T20:35:39,570 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer(746): ClusterId : ca97f1e1-2cc2-4c21-8354-9e73d4efee3a 2024-11-20T20:35:39,573 DEBUG [RS:0;d7d59561a64e:41209 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T20:35:39,576 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T20:35:39,591 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T20:35:39,602 DEBUG [RS:0;d7d59561a64e:41209 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T20:35:39,602 DEBUG [RS:0;d7d59561a64e:41209 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T20:35:39,600 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d7d59561a64e,39077,1732134935979 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T20:35:39,626 DEBUG [RS:0;d7d59561a64e:41209 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T20:35:39,627 DEBUG [RS:0;d7d59561a64e:41209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24828fe1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d7d59561a64e/172.17.0.2:0 2024-11-20T20:35:39,657 DEBUG [RS:0;d7d59561a64e:41209 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d7d59561a64e:41209 2024-11-20T20:35:39,657 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:35:39,658 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:35:39,658 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:35:39,658 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:35:39,659 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d7d59561a64e:0, corePoolSize=10, maxPoolSize=10 2024-11-20T20:35:39,659 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:35:39,659 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d7d59561a64e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T20:35:39,659 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:35:39,662 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T20:35:39,662 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T20:35:39,662 DEBUG [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T20:35:39,666 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer(2659): reportForDuty to master=d7d59561a64e,39077,1732134935979 with port=41209, startcode=1732134937306 2024-11-20T20:35:39,685 DEBUG [RS:0;d7d59561a64e:41209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T20:35:39,700 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:35:39,701 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T20:35:39,722 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732134969722 2024-11-20T20:35:39,724 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T20:35:39,717 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:35:39,726 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T20:35:39,726 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T20:35:39,737 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T20:35:39,737 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T20:35:39,740 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T20:35:39,741 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T20:35:39,742 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:39,767 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T20:35:39,769 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T20:35:39,769 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T20:35:39,778 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T20:35:39,779 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T20:35:39,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741831_1007 (size=1321) 2024-11-20T20:35:39,787 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732134939781,5,FailOnTimeoutGroup] 2024-11-20T20:35:39,787 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732134939787,5,FailOnTimeoutGroup] 2024-11-20T20:35:39,788 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:39,792 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T20:35:39,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741831_1007 (size=1321) 2024-11-20T20:35:39,793 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:39,801 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:39,808 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T20:35:39,809 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18 2024-11-20T20:35:39,846 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40789, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T20:35:39,856 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39077 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d7d59561a64e,41209,1732134937306 2024-11-20T20:35:39,859 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39077 {}] master.ServerManager(517): Registering regionserver=d7d59561a64e,41209,1732134937306 2024-11-20T20:35:39,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741832_1008 (size=32) 2024-11-20T20:35:39,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741832_1008 (size=32) 2024-11-20T20:35:39,884 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:35:39,884 DEBUG [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18 2024-11-20T20:35:39,884 DEBUG [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42437 2024-11-20T20:35:39,885 DEBUG [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T20:35:39,888 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T20:35:39,891 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T20:35:39,892 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:35:39,893 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:35:39,893 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T20:35:39,896 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T20:35:39,896 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:35:39,898 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:35:39,898 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T20:35:39,902 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T20:35:39,902 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:35:39,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T20:35:39,904 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:35:39,904 DEBUG [RS:0;d7d59561a64e:41209 {}] zookeeper.ZKUtil(111): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d7d59561a64e,41209,1732134937306 2024-11-20T20:35:39,904 WARN [RS:0;d7d59561a64e:41209 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T20:35:39,904 INFO [RS:0;d7d59561a64e:41209 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:35:39,904 DEBUG [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306 2024-11-20T20:35:39,904 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T20:35:39,907 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d7d59561a64e,41209,1732134937306] 2024-11-20T20:35:39,908 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T20:35:39,908 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:35:39,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:35:39,911 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T20:35:39,914 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740 2024-11-20T20:35:39,915 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740 2024-11-20T20:35:39,921 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T20:35:39,921 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T20:35:39,922 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T20:35:39,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T20:35:39,951 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T20:35:39,953 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:35:39,954 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=711353, jitterRate=-0.09546907246112823}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T20:35:39,962 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732134939884Initializing all the Stores at 1732134939887 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732134939887Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732134939887Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732134939887Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732134939887Cleaning up temporary data from old regions at 1732134939921 (+34 ms)Region opened successfully at 1732134939962 (+41 ms) 2024-11-20T20:35:39,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T20:35:39,963 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T20:35:39,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T20:35:39,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T20:35:39,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T20:35:39,966 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T20:35:39,966 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732134939963Disabling compacts and flushes for region at 1732134939963Disabling writes for close at 1732134939963Writing region close event to WAL at 1732134939965 (+2 ms)Closed at 1732134939965 2024-11-20T20:35:39,970 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:35:39,971 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T20:35:39,978 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T20:35:39,980 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T20:35:39,986 INFO [RS:0;d7d59561a64e:41209 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T20:35:39,986 INFO [RS:0;d7d59561a64e:41209 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:39,989 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T20:35:40,005 INFO [RS:0;d7d59561a64e:41209 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T20:35:40,007 INFO [RS:0;d7d59561a64e:41209 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:40,007 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T20:35:40,008 DEBUG [RS:0;d7d59561a64e:41209 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:35:40,008 DEBUG [RS:0;d7d59561a64e:41209 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:35:40,008 DEBUG [RS:0;d7d59561a64e:41209 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:35:40,008 DEBUG [RS:0;d7d59561a64e:41209 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:35:40,008 DEBUG [RS:0;d7d59561a64e:41209 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:35:40,009 DEBUG [RS:0;d7d59561a64e:41209 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d7d59561a64e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T20:35:40,009 DEBUG [RS:0;d7d59561a64e:41209 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:35:40,009 DEBUG [RS:0;d7d59561a64e:41209 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:35:40,009 DEBUG [RS:0;d7d59561a64e:41209 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:35:40,010 DEBUG [RS:0;d7d59561a64e:41209 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:35:40,010 DEBUG [RS:0;d7d59561a64e:41209 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:35:40,010 DEBUG [RS:0;d7d59561a64e:41209 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:35:40,010 DEBUG [RS:0;d7d59561a64e:41209 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d7d59561a64e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T20:35:40,010 DEBUG [RS:0;d7d59561a64e:41209 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T20:35:40,011 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T20:35:40,014 INFO [RS:0;d7d59561a64e:41209 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:40,016 INFO [RS:0;d7d59561a64e:41209 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:40,017 INFO [RS:0;d7d59561a64e:41209 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:40,017 INFO [RS:0;d7d59561a64e:41209 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:40,017 INFO [RS:0;d7d59561a64e:41209 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:40,017 INFO [RS:0;d7d59561a64e:41209 {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,41209,1732134937306-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T20:35:40,041 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T20:35:40,043 INFO [RS:0;d7d59561a64e:41209 {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,41209,1732134937306-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:40,043 INFO [RS:0;d7d59561a64e:41209 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:40,043 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.Replication(171): d7d59561a64e,41209,1732134937306 started 2024-11-20T20:35:40,064 INFO [RS:0;d7d59561a64e:41209 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:40,064 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer(1482): Serving as d7d59561a64e,41209,1732134937306, RpcServer on d7d59561a64e/172.17.0.2:41209, sessionid=0x10159f5945f0001 2024-11-20T20:35:40,065 DEBUG [RS:0;d7d59561a64e:41209 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T20:35:40,066 DEBUG [RS:0;d7d59561a64e:41209 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d7d59561a64e,41209,1732134937306 2024-11-20T20:35:40,066 DEBUG [RS:0;d7d59561a64e:41209 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd7d59561a64e,41209,1732134937306' 2024-11-20T20:35:40,066 DEBUG [RS:0;d7d59561a64e:41209 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T20:35:40,067 DEBUG [RS:0;d7d59561a64e:41209 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T20:35:40,068 DEBUG [RS:0;d7d59561a64e:41209 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T20:35:40,068 DEBUG [RS:0;d7d59561a64e:41209 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T20:35:40,068 DEBUG [RS:0;d7d59561a64e:41209 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d7d59561a64e,41209,1732134937306 2024-11-20T20:35:40,068 DEBUG [RS:0;d7d59561a64e:41209 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd7d59561a64e,41209,1732134937306' 2024-11-20T20:35:40,069 DEBUG [RS:0;d7d59561a64e:41209 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T20:35:40,069 DEBUG [RS:0;d7d59561a64e:41209 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T20:35:40,070 DEBUG [RS:0;d7d59561a64e:41209 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T20:35:40,070 INFO [RS:0;d7d59561a64e:41209 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T20:35:40,070 INFO [RS:0;d7d59561a64e:41209 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T20:35:40,162 WARN [d7d59561a64e:39077 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T20:35:40,183 INFO [RS:0;d7d59561a64e:41209 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C41209%2C1732134937306, suffix=, logDir=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306, archiveDir=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/oldWALs, maxLogs=32 2024-11-20T20:35:40,187 INFO [RS:0;d7d59561a64e:41209 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C41209%2C1732134937306.1732134940186 2024-11-20T20:35:40,198 INFO [RS:0;d7d59561a64e:41209 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134940186 2024-11-20T20:35:40,204 DEBUG [RS:0;d7d59561a64e:41209 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40601:40601),(127.0.0.1/127.0.0.1:36999:36999)] 2024-11-20T20:35:40,415 DEBUG [d7d59561a64e:39077 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T20:35:40,429 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d7d59561a64e,41209,1732134937306 2024-11-20T20:35:40,437 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d7d59561a64e,41209,1732134937306, state=OPENING 2024-11-20T20:35:40,468 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T20:35:40,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:35:40,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:35:40,481 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:35:40,481 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:35:40,484 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T20:35:40,486 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d7d59561a64e,41209,1732134937306}] 2024-11-20T20:35:40,666 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T20:35:40,670 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59285, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T20:35:40,682 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T20:35:40,682 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:35:40,686 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C41209%2C1732134937306.meta, suffix=.meta, logDir=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306, archiveDir=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/oldWALs, maxLogs=32 2024-11-20T20:35:40,688 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C41209%2C1732134937306.meta.1732134940688.meta 2024-11-20T20:35:40,699 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.meta.1732134940688.meta 2024-11-20T20:35:40,708 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36999:36999),(127.0.0.1/127.0.0.1:40601:40601)] 2024-11-20T20:35:40,713 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:35:40,716 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T20:35:40,719 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T20:35:40,726 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T20:35:40,733 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T20:35:40,734 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:35:40,735 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T20:35:40,735 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T20:35:40,750 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T20:35:40,752 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T20:35:40,753 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:35:40,754 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:35:40,754 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T20:35:40,757 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T20:35:40,757 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:35:40,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:35:40,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T20:35:40,760 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T20:35:40,760 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:35:40,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:35:40,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T20:35:40,763 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T20:35:40,763 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:35:40,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:35:40,764 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T20:35:40,766 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740 2024-11-20T20:35:40,768 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740 2024-11-20T20:35:40,771 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T20:35:40,771 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T20:35:40,772 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T20:35:40,775 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T20:35:40,777 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=763671, jitterRate=-0.02894313633441925}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T20:35:40,778 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T20:35:40,780 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732134940736Writing region info on filesystem at 1732134940736Initializing all the Stores at 1732134940744 (+8 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732134940744Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732134940750 (+6 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732134940750Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732134940750Cleaning up temporary data from old regions at 1732134940771 (+21 ms)Running coprocessor post-open hooks at 1732134940778 (+7 ms)Region opened successfully at 1732134940779 (+1 ms) 2024-11-20T20:35:40,788 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732134940657 2024-11-20T20:35:40,807 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T20:35:40,807 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T20:35:40,814 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d7d59561a64e,41209,1732134937306 2024-11-20T20:35:40,833 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d7d59561a64e,41209,1732134937306, state=OPEN 2024-11-20T20:35:41,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T20:35:41,065 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:35:41,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T20:35:41,068 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:35:41,069 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d7d59561a64e,41209,1732134937306 2024-11-20T20:35:41,087 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T20:35:41,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d7d59561a64e,41209,1732134937306 in 583 msec 2024-11-20T20:35:41,098 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T20:35:41,098 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.1100 sec 2024-11-20T20:35:41,101 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:35:41,101 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T20:35:41,128 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T20:35:41,129 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d7d59561a64e,41209,1732134937306, seqNum=-1] 2024-11-20T20:35:41,151 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T20:35:41,154 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48587, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T20:35:41,180 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.6910 sec 2024-11-20T20:35:41,180 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732134941180, completionTime=-1 2024-11-20T20:35:41,184 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T20:35:41,184 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T20:35:41,223 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T20:35:41,223 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732135001223 2024-11-20T20:35:41,223 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732135061223 2024-11-20T20:35:41,224 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 39 msec 2024-11-20T20:35:41,229 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,39077,1732134935979-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:41,229 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,39077,1732134935979-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:41,229 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,39077,1732134935979-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:41,232 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d7d59561a64e:39077, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:41,237 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:41,240 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:41,245 DEBUG [master/d7d59561a64e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T20:35:41,278 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.569sec 2024-11-20T20:35:41,280 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T20:35:41,281 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T20:35:41,283 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T20:35:41,284 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T20:35:41,284 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T20:35:41,285 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,39077,1732134935979-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T20:35:41,286 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,39077,1732134935979-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T20:35:41,299 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T20:35:41,300 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T20:35:41,300 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,39077,1732134935979-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:35:41,433 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a4c629c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:35:41,437 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-20T20:35:41,439 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-20T20:35:41,443 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d7d59561a64e,39077,-1 for getting cluster id 2024-11-20T20:35:41,447 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T20:35:41,468 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ca97f1e1-2cc2-4c21-8354-9e73d4efee3a' 2024-11-20T20:35:41,498 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T20:35:41,498 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ca97f1e1-2cc2-4c21-8354-9e73d4efee3a" 2024-11-20T20:35:41,502 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b860e5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:35:41,502 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d7d59561a64e,39077,-1] 2024-11-20T20:35:41,506 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T20:35:41,508 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:35:41,515 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42252, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T20:35:41,520 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5286c427, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:35:41,521 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T20:35:41,542 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d7d59561a64e,41209,1732134937306, seqNum=-1] 2024-11-20T20:35:41,544 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T20:35:41,559 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37486, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T20:35:41,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d7d59561a64e,39077,1732134935979 2024-11-20T20:35:41,592 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:35:41,604 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T20:35:41,623 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T20:35:41,632 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is d7d59561a64e,39077,1732134935979 2024-11-20T20:35:41,636 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@50099097 2024-11-20T20:35:41,637 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T20:35:41,649 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42254, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T20:35:41,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39077 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T20:35:41,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39077 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T20:35:41,658 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39077 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T20:35:41,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39077 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-20T20:35:41,672 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T20:35:41,675 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:35:41,675 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39077 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-20T20:35:41,677 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T20:35:41,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39077 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T20:35:41,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741835_1011 (size=389) 2024-11-20T20:35:41,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741835_1011 (size=389) 2024-11-20T20:35:41,744 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 432451b2c58c0e3e3b8fe124f450280f, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18 2024-11-20T20:35:41,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741836_1012 (size=72) 2024-11-20T20:35:41,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741836_1012 (size=72) 2024-11-20T20:35:41,762 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:35:41,762 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 432451b2c58c0e3e3b8fe124f450280f, disabling compactions & flushes 2024-11-20T20:35:41,762 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. 2024-11-20T20:35:41,762 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. 2024-11-20T20:35:41,762 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. after waiting 0 ms 2024-11-20T20:35:41,762 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. 2024-11-20T20:35:41,762 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. 2024-11-20T20:35:41,763 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 432451b2c58c0e3e3b8fe124f450280f: Waiting for close lock at 1732134941762Disabling compacts and flushes for region at 1732134941762Disabling writes for close at 1732134941762Writing region close event to WAL at 1732134941762Closed at 1732134941762 2024-11-20T20:35:41,777 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T20:35:41,784 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732134941777"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732134941777"}]},"ts":"1732134941777"} 2024-11-20T20:35:41,809 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T20:35:41,811 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T20:35:41,815 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732134941812"}]},"ts":"1732134941812"} 2024-11-20T20:35:41,823 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-20T20:35:41,826 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=432451b2c58c0e3e3b8fe124f450280f, ASSIGN}] 2024-11-20T20:35:41,836 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=432451b2c58c0e3e3b8fe124f450280f, ASSIGN 2024-11-20T20:35:41,845 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=432451b2c58c0e3e3b8fe124f450280f, ASSIGN; state=OFFLINE, location=d7d59561a64e,41209,1732134937306; forceNewPlan=false, retain=false 2024-11-20T20:35:41,997 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=432451b2c58c0e3e3b8fe124f450280f, regionState=OPENING, regionLocation=d7d59561a64e,41209,1732134937306 2024-11-20T20:35:42,003 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=432451b2c58c0e3e3b8fe124f450280f, ASSIGN because future has completed 2024-11-20T20:35:42,005 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 432451b2c58c0e3e3b8fe124f450280f, server=d7d59561a64e,41209,1732134937306}] 2024-11-20T20:35:42,232 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. 2024-11-20T20:35:42,233 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 432451b2c58c0e3e3b8fe124f450280f, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f.', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:35:42,234 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:35:42,234 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:35:42,234 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:35:42,234 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:35:42,260 INFO [StoreOpener-432451b2c58c0e3e3b8fe124f450280f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:35:42,281 INFO [StoreOpener-432451b2c58c0e3e3b8fe124f450280f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 432451b2c58c0e3e3b8fe124f450280f columnFamilyName info 2024-11-20T20:35:42,281 DEBUG [StoreOpener-432451b2c58c0e3e3b8fe124f450280f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:35:42,290 INFO [StoreOpener-432451b2c58c0e3e3b8fe124f450280f-1 {}] regionserver.HStore(327): Store=432451b2c58c0e3e3b8fe124f450280f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:35:42,291 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:35:42,304 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:35:42,313 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:35:42,319 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:35:42,319 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:35:42,332 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:35:42,357 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:35:42,358 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 432451b2c58c0e3e3b8fe124f450280f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=782649, jitterRate=-0.004811093211174011}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T20:35:42,358 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:35:42,360 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 432451b2c58c0e3e3b8fe124f450280f: Running coprocessor pre-open hook at 1732134942234Writing region info on filesystem at 1732134942234Initializing all the Stores at 1732134942252 (+18 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732134942252Cleaning up temporary data from old regions at 1732134942319 (+67 ms)Running coprocessor post-open hooks at 1732134942358 (+39 ms)Region opened successfully at 1732134942360 (+2 ms) 2024-11-20T20:35:42,369 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f., pid=6, masterSystemTime=1732134942175 2024-11-20T20:35:42,380 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. 2024-11-20T20:35:42,380 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. 2024-11-20T20:35:42,382 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=432451b2c58c0e3e3b8fe124f450280f, regionState=OPEN, openSeqNum=2, regionLocation=d7d59561a64e,41209,1732134937306 2024-11-20T20:35:42,408 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39077 {}] assignment.AssignmentManager(1535): Unable to acquire lock for regionNode state=OPEN, location=d7d59561a64e,41209,1732134937306, table=TestLogRolling-testSlowSyncLogRolling, region=432451b2c58c0e3e3b8fe124f450280f. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-20T20:35:42,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 432451b2c58c0e3e3b8fe124f450280f, server=d7d59561a64e,41209,1732134937306 because future has completed 2024-11-20T20:35:42,432 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T20:35:42,433 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 432451b2c58c0e3e3b8fe124f450280f, server=d7d59561a64e,41209,1732134937306 in 411 msec 2024-11-20T20:35:42,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T20:35:42,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=432451b2c58c0e3e3b8fe124f450280f, ASSIGN in 607 msec 2024-11-20T20:35:42,448 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T20:35:42,449 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732134942448"}]},"ts":"1732134942448"} 2024-11-20T20:35:42,455 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-20T20:35:42,458 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T20:35:42,477 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 796 msec 2024-11-20T20:35:46,250 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T20:35:46,427 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T20:35:46,433 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-20T20:35:46,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T20:35:46,487 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T20:35:46,497 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-20T20:35:46,497 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-20T20:35:46,499 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T20:35:46,499 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T20:35:46,499 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T20:35:46,499 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T20:35:51,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39077 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T20:35:51,777 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-20T20:35:51,780 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-20T20:35:51,789 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-20T20:35:51,790 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. 2024-11-20T20:35:51,791 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C41209%2C1732134937306.1732134951791 2024-11-20T20:35:51,801 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:35:51,802 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:35:51,802 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:35:51,802 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:35:51,802 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:35:51,803 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134940186 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134951791 2024-11-20T20:35:51,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741833_1009 (size=451) 2024-11-20T20:35:51,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741833_1009 (size=451) 2024-11-20T20:35:51,812 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134940186 to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/oldWALs/d7d59561a64e%2C41209%2C1732134937306.1732134940186 2024-11-20T20:35:51,813 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36999:36999),(127.0.0.1/127.0.0.1:40601:40601)] 2024-11-20T20:35:51,826 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f., hostname=d7d59561a64e,41209,1732134937306, seqNum=2] 2024-11-20T20:36:03,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41209 {}] regionserver.HRegion(8855): Flush requested on 432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:36:03,875 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 432451b2c58c0e3e3b8fe124f450280f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T20:36:03,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/9e0ee85716294d99bc4afc832906038f is 1080, key is row0001/info:/1732134951831/Put/seqid=0 2024-11-20T20:36:03,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741838_1014 (size=12509) 2024-11-20T20:36:03,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741838_1014 (size=12509) 2024-11-20T20:36:03,944 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/9e0ee85716294d99bc4afc832906038f 2024-11-20T20:36:03,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/9e0ee85716294d99bc4afc832906038f as hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/9e0ee85716294d99bc4afc832906038f 2024-11-20T20:36:03,999 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/9e0ee85716294d99bc4afc832906038f, entries=7, sequenceid=11, filesize=12.2 K 2024-11-20T20:36:04,006 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 432451b2c58c0e3e3b8fe124f450280f in 134ms, sequenceid=11, compaction requested=false 2024-11-20T20:36:04,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 432451b2c58c0e3e3b8fe124f450280f: 2024-11-20T20:36:04,506 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T20:36:11,893 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C41209%2C1732134937306.1732134971893 2024-11-20T20:36:12,103 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:12,103 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:12,103 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:12,103 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:12,103 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:12,103 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:12,104 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134951791 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134971893 2024-11-20T20:36:12,105 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40601:40601),(127.0.0.1/127.0.0.1:36999:36999)] 2024-11-20T20:36:12,105 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134951791 is not closed yet, will try archiving it next time 2024-11-20T20:36:12,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741837_1013 (size=12399) 2024-11-20T20:36:12,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741837_1013 (size=12399) 2024-11-20T20:36:12,308 INFO [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK], DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK]] 2024-11-20T20:36:14,513 INFO [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK], DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK]] 2024-11-20T20:36:16,717 INFO [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK], DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK]] 2024-11-20T20:36:18,921 INFO [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK], DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK]] 2024-11-20T20:36:18,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41209 {}] regionserver.HRegion(8855): Flush requested on 432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:36:18,921 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 432451b2c58c0e3e3b8fe124f450280f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T20:36:19,123 INFO [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK], DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK]] 2024-11-20T20:36:19,128 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/14469cef47454f45a6222a7e7a0384c2 is 1080, key is row0008/info:/1732134965872/Put/seqid=0 2024-11-20T20:36:19,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741840_1016 (size=12509) 2024-11-20T20:36:19,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741840_1016 (size=12509) 2024-11-20T20:36:19,146 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/14469cef47454f45a6222a7e7a0384c2 2024-11-20T20:36:19,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/14469cef47454f45a6222a7e7a0384c2 as hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/14469cef47454f45a6222a7e7a0384c2 2024-11-20T20:36:19,177 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/14469cef47454f45a6222a7e7a0384c2, entries=7, sequenceid=21, filesize=12.2 K 2024-11-20T20:36:19,379 INFO [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK], DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK]] 2024-11-20T20:36:19,379 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 432451b2c58c0e3e3b8fe124f450280f in 458ms, sequenceid=21, compaction requested=false 2024-11-20T20:36:19,379 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 432451b2c58c0e3e3b8fe124f450280f: 2024-11-20T20:36:19,379 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-20T20:36:19,379 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:36:19,380 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/9e0ee85716294d99bc4afc832906038f because midkey is the same as first or last row 2024-11-20T20:36:21,126 INFO [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK], DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK]] 2024-11-20T20:36:21,352 INFO [master/d7d59561a64e:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T20:36:21,352 INFO [master/d7d59561a64e:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T20:36:23,331 INFO [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK], DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK]] 2024-11-20T20:36:23,333 WARN [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK], DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK]] 2024-11-20T20:36:23,334 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d7d59561a64e%2C41209%2C1732134937306:(num 1732134971893) roll requested 2024-11-20T20:36:23,335 INFO [regionserver/d7d59561a64e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C41209%2C1732134937306.1732134983335 2024-11-20T20:36:23,549 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 211 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK], DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK]] 2024-11-20T20:36:23,550 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:23,550 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:23,550 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:23,550 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:23,550 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:23,551 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134971893 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134983335 2024-11-20T20:36:23,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741839_1015 (size=7739) 2024-11-20T20:36:23,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741839_1015 (size=7739) 2024-11-20T20:36:23,563 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36999:36999),(127.0.0.1/127.0.0.1:40601:40601)] 2024-11-20T20:36:23,563 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134971893 is not closed yet, will try archiving it next time 2024-11-20T20:36:23,563 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134951791 to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/oldWALs/d7d59561a64e%2C41209%2C1732134937306.1732134951791 2024-11-20T20:36:25,539 INFO [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1368): Slow sync cost: 204 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:27,234 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 432451b2c58c0e3e3b8fe124f450280f, had cached 0 bytes from a total of 25018 2024-11-20T20:36:27,746 INFO [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:29,953 INFO [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:32,156 INFO [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:34,159 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-20T20:36:34,159 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C41209%2C1732134937306.1732134994159 2024-11-20T20:36:34,507 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T20:36:39,173 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5010 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:39,175 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5010 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:39,176 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d7d59561a64e%2C41209%2C1732134937306:(num 1732134994159) roll requested 2024-11-20T20:36:39,176 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:39,176 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:39,176 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:39,176 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:39,176 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:39,177 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134983335 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134994159 2024-11-20T20:36:39,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741841_1017 (size=4753) 2024-11-20T20:36:39,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741841_1017 (size=4753) 2024-11-20T20:36:39,185 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36999:36999),(127.0.0.1/127.0.0.1:40601:40601)] 2024-11-20T20:36:39,186 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134983335 is not closed yet, will try archiving it next time 2024-11-20T20:36:39,186 INFO [regionserver/d7d59561a64e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C41209%2C1732134937306.1732134999186 2024-11-20T20:36:44,191 INFO [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:44,192 WARN [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:44,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41209 {}] regionserver.HRegion(8855): Flush requested on 432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:36:44,193 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 432451b2c58c0e3e3b8fe124f450280f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T20:36:44,201 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5010 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:44,201 WARN [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5010 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:46,194 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-20T20:36:49,197 INFO [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:49,197 WARN [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:49,198 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:49,198 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:49,199 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:49,199 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:49,199 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:49,200 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134994159 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134999186 2024-11-20T20:36:49,202 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36999:36999),(127.0.0.1/127.0.0.1:40601:40601)] 2024-11-20T20:36:49,203 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134994159 is not closed yet, will try archiving it next time 2024-11-20T20:36:49,203 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d7d59561a64e%2C41209%2C1732134937306:(num 1732134999186) roll requested 2024-11-20T20:36:49,204 INFO [regionserver/d7d59561a64e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C41209%2C1732134937306.1732135009203 2024-11-20T20:36:49,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741842_1018 (size=1569) 2024-11-20T20:36:49,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741842_1018 (size=1569) 2024-11-20T20:36:49,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/f39b23eaff2c4f6fa039f2ba3e96035c is 1080, key is row0015/info:/1732134980924/Put/seqid=0 2024-11-20T20:36:49,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741844_1020 (size=12509) 2024-11-20T20:36:49,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741844_1020 (size=12509) 2024-11-20T20:36:49,216 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/f39b23eaff2c4f6fa039f2ba3e96035c 2024-11-20T20:36:49,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/f39b23eaff2c4f6fa039f2ba3e96035c as hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/f39b23eaff2c4f6fa039f2ba3e96035c 2024-11-20T20:36:49,238 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/f39b23eaff2c4f6fa039f2ba3e96035c, entries=7, sequenceid=31, filesize=12.2 K 2024-11-20T20:36:54,214 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5004 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:54,214 WARN [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5004 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:54,239 INFO [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:54,240 WARN [FSHLog-0-hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18-prefix:d7d59561a64e,41209,1732134937306 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42689,DS-37a32f44-9931-4c9e-93d6-4448f8194e57,DISK], DatanodeInfoWithStorage[127.0.0.1:46327,DS-dd9bf658-eccc-4cb9-b0fe-903b6f0bd956,DISK]] 2024-11-20T20:36:54,240 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 432451b2c58c0e3e3b8fe124f450280f in 10047ms, sequenceid=31, compaction requested=true 2024-11-20T20:36:54,240 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:54,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 432451b2c58c0e3e3b8fe124f450280f: 2024-11-20T20:36:54,240 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:54,240 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:54,240 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-20T20:36:54,240 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:36:54,240 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:54,240 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/9e0ee85716294d99bc4afc832906038f because midkey is the same as first or last row 2024-11-20T20:36:54,241 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:54,241 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134999186 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732135009203 2024-11-20T20:36:54,242 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36999:36999),(127.0.0.1/127.0.0.1:40601:40601)] 2024-11-20T20:36:54,243 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134999186 is not closed yet, will try archiving it next time 2024-11-20T20:36:54,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 432451b2c58c0e3e3b8fe124f450280f:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T20:36:54,243 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134971893 to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/oldWALs/d7d59561a64e%2C41209%2C1732134937306.1732134971893 2024-11-20T20:36:54,243 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d7d59561a64e%2C41209%2C1732134937306:(num 1732135014243) roll requested 2024-11-20T20:36:54,244 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C41209%2C1732134937306.1732135014243 2024-11-20T20:36:54,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741843_1019 (size=438) 2024-11-20T20:36:54,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741843_1019 (size=438) 2024-11-20T20:36:54,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:36:54,247 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134983335 to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/oldWALs/d7d59561a64e%2C41209%2C1732134937306.1732134983335 2024-11-20T20:36:54,248 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T20:36:54,251 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134994159 to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/oldWALs/d7d59561a64e%2C41209%2C1732134937306.1732134994159 2024-11-20T20:36:54,251 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T20:36:54,253 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732134999186 to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/oldWALs/d7d59561a64e%2C41209%2C1732134937306.1732134999186 2024-11-20T20:36:54,253 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.HStore(1541): 432451b2c58c0e3e3b8fe124f450280f/info is initiating minor compaction (all files) 2024-11-20T20:36:54,254 INFO [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 432451b2c58c0e3e3b8fe124f450280f/info in TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. 2024-11-20T20:36:54,254 INFO [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/9e0ee85716294d99bc4afc832906038f, hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/14469cef47454f45a6222a7e7a0384c2, hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/f39b23eaff2c4f6fa039f2ba3e96035c] into tmpdir=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp, totalSize=36.6 K 2024-11-20T20:36:54,255 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9e0ee85716294d99bc4afc832906038f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732134951831 2024-11-20T20:36:54,256 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] compactions.Compactor(225): Compacting 14469cef47454f45a6222a7e7a0384c2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732134965872 2024-11-20T20:36:54,257 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] compactions.Compactor(225): Compacting f39b23eaff2c4f6fa039f2ba3e96035c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732134980924 2024-11-20T20:36:54,258 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:54,258 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:54,259 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:54,259 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:54,259 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:54,259 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732135009203 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732135014243 2024-11-20T20:36:54,260 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40601:40601),(127.0.0.1/127.0.0.1:36999:36999)] 2024-11-20T20:36:54,260 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732135009203 is not closed yet, will try archiving it next time 2024-11-20T20:36:54,261 INFO [regionserver/d7d59561a64e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C41209%2C1732134937306.1732135014260 2024-11-20T20:36:54,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741845_1021 (size=93) 2024-11-20T20:36:54,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741845_1021 (size=93) 2024-11-20T20:36:54,264 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732135009203 to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/oldWALs/d7d59561a64e%2C41209%2C1732134937306.1732135009203 2024-11-20T20:36:54,269 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:54,269 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:54,270 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:54,270 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:54,270 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:36:54,270 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732135014243 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/WALs/d7d59561a64e,41209,1732134937306/d7d59561a64e%2C41209%2C1732134937306.1732135014260 2024-11-20T20:36:54,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741846_1022 (size=1258) 2024-11-20T20:36:54,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741846_1022 (size=1258) 2024-11-20T20:36:54,274 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40601:40601),(127.0.0.1/127.0.0.1:36999:36999)] 2024-11-20T20:36:54,290 INFO [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 432451b2c58c0e3e3b8fe124f450280f#info#compaction#3 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T20:36:54,291 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/f4d2cb3d96624100bda57c746568a35d is 1080, key is row0001/info:/1732134951831/Put/seqid=0 2024-11-20T20:36:54,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741848_1024 (size=27710) 2024-11-20T20:36:54,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741848_1024 (size=27710) 2024-11-20T20:36:54,308 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/f4d2cb3d96624100bda57c746568a35d as hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/f4d2cb3d96624100bda57c746568a35d 2024-11-20T20:36:54,323 INFO [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 432451b2c58c0e3e3b8fe124f450280f/info of 432451b2c58c0e3e3b8fe124f450280f into f4d2cb3d96624100bda57c746568a35d(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T20:36:54,323 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 432451b2c58c0e3e3b8fe124f450280f: 2024-11-20T20:36:54,325 INFO [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f., storeName=432451b2c58c0e3e3b8fe124f450280f/info, priority=13, startTime=1732135014242; duration=0sec 2024-11-20T20:36:54,325 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-20T20:36:54,325 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:36:54,325 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/f4d2cb3d96624100bda57c746568a35d because midkey is the same as first or last row 2024-11-20T20:36:54,325 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-20T20:36:54,325 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:36:54,326 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/f4d2cb3d96624100bda57c746568a35d because midkey is the same as first or last row 2024-11-20T20:36:54,326 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-20T20:36:54,326 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:36:54,326 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/f4d2cb3d96624100bda57c746568a35d because midkey is the same as first or last row 2024-11-20T20:36:54,326 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:36:54,326 DEBUG [RS:0;d7d59561a64e:41209-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 432451b2c58c0e3e3b8fe124f450280f:info 2024-11-20T20:37:04,507 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T20:37:06,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41209 {}] regionserver.HRegion(8855): Flush requested on 432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:37:06,287 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 432451b2c58c0e3e3b8fe124f450280f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T20:37:06,294 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/3badfa18bd944eaab4f7f67ca441db3a is 1080, key is row0022/info:/1732135014262/Put/seqid=0 2024-11-20T20:37:06,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741849_1025 (size=12509) 2024-11-20T20:37:06,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741849_1025 (size=12509) 2024-11-20T20:37:06,301 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/3badfa18bd944eaab4f7f67ca441db3a 2024-11-20T20:37:06,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/3badfa18bd944eaab4f7f67ca441db3a as hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/3badfa18bd944eaab4f7f67ca441db3a 2024-11-20T20:37:06,322 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/3badfa18bd944eaab4f7f67ca441db3a, entries=7, sequenceid=42, filesize=12.2 K 2024-11-20T20:37:06,324 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 432451b2c58c0e3e3b8fe124f450280f in 36ms, sequenceid=42, compaction requested=false 2024-11-20T20:37:06,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 432451b2c58c0e3e3b8fe124f450280f: 2024-11-20T20:37:06,324 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-20T20:37:06,324 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:37:06,324 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/f4d2cb3d96624100bda57c746568a35d because midkey is the same as first or last row 2024-11-20T20:37:12,235 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 432451b2c58c0e3e3b8fe124f450280f, had cached 0 bytes from a total of 40219 2024-11-20T20:37:14,326 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T20:37:14,327 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T20:37:14,327 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:37:14,332 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:37:14,333 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:37:14,333 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T20:37:14,333 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T20:37:14,333 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1319998913, stopped=false 2024-11-20T20:37:14,334 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d7d59561a64e,39077,1732134935979 2024-11-20T20:37:14,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T20:37:14,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T20:37:14,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:14,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:14,408 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T20:37:14,409 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T20:37:14,410 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:37:14,410 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:37:14,410 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:37:14,410 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:37:14,411 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd7d59561a64e,41209,1732134937306' ***** 2024-11-20T20:37:14,411 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T20:37:14,412 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T20:37:14,412 INFO [RS:0;d7d59561a64e:41209 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T20:37:14,412 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T20:37:14,413 INFO [RS:0;d7d59561a64e:41209 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T20:37:14,413 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer(3091): Received CLOSE for 432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:37:14,414 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer(959): stopping server d7d59561a64e,41209,1732134937306 2024-11-20T20:37:14,414 INFO [RS:0;d7d59561a64e:41209 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T20:37:14,414 INFO [RS:0;d7d59561a64e:41209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d7d59561a64e:41209. 2024-11-20T20:37:14,414 DEBUG [RS:0;d7d59561a64e:41209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:37:14,415 DEBUG [RS:0;d7d59561a64e:41209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:37:14,415 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 432451b2c58c0e3e3b8fe124f450280f, disabling compactions & flushes 2024-11-20T20:37:14,415 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T20:37:14,415 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. 2024-11-20T20:37:14,415 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T20:37:14,415 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T20:37:14,415 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. 2024-11-20T20:37:14,416 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. after waiting 0 ms 2024-11-20T20:37:14,416 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T20:37:14,416 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. 2024-11-20T20:37:14,416 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 432451b2c58c0e3e3b8fe124f450280f 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-20T20:37:14,416 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-20T20:37:14,416 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T20:37:14,417 DEBUG [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer(1325): Online Regions={432451b2c58c0e3e3b8fe124f450280f=TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f., 1588230740=hbase:meta,,1.1588230740} 2024-11-20T20:37:14,417 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T20:37:14,417 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T20:37:14,417 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T20:37:14,417 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T20:37:14,417 DEBUG [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 432451b2c58c0e3e3b8fe124f450280f 2024-11-20T20:37:14,417 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-20T20:37:14,422 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/0e7161e8a06b4ede98411567f337354c is 1080, key is row0029/info:/1732135028291/Put/seqid=0 2024-11-20T20:37:14,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741850_1026 (size=8193) 2024-11-20T20:37:14,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741850_1026 (size=8193) 2024-11-20T20:37:14,431 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/0e7161e8a06b4ede98411567f337354c 2024-11-20T20:37:14,439 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/.tmp/info/33dedc55ec874122ab2d825457a48a72 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f./info:regioninfo/1732134942381/Put/seqid=0 2024-11-20T20:37:14,444 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/.tmp/info/0e7161e8a06b4ede98411567f337354c as hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/0e7161e8a06b4ede98411567f337354c 2024-11-20T20:37:14,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741851_1027 (size=7016) 2024-11-20T20:37:14,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741851_1027 (size=7016) 2024-11-20T20:37:14,451 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/.tmp/info/33dedc55ec874122ab2d825457a48a72 2024-11-20T20:37:14,455 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/0e7161e8a06b4ede98411567f337354c, entries=3, sequenceid=48, filesize=8.0 K 2024-11-20T20:37:14,457 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 432451b2c58c0e3e3b8fe124f450280f in 41ms, sequenceid=48, compaction requested=true 2024-11-20T20:37:14,458 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/9e0ee85716294d99bc4afc832906038f, hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/14469cef47454f45a6222a7e7a0384c2, hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/f39b23eaff2c4f6fa039f2ba3e96035c] to archive 2024-11-20T20:37:14,461 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T20:37:14,464 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/9e0ee85716294d99bc4afc832906038f to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/archive/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/9e0ee85716294d99bc4afc832906038f 2024-11-20T20:37:14,467 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/14469cef47454f45a6222a7e7a0384c2 to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/archive/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/14469cef47454f45a6222a7e7a0384c2 2024-11-20T20:37:14,469 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/f39b23eaff2c4f6fa039f2ba3e96035c to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/archive/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/info/f39b23eaff2c4f6fa039f2ba3e96035c 2024-11-20T20:37:14,476 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/.tmp/ns/133822a6a0a04412a84a465e58580c72 is 43, key is default/ns:d/1732134941159/Put/seqid=0 2024-11-20T20:37:14,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741852_1028 (size=5153) 2024-11-20T20:37:14,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741852_1028 (size=5153) 2024-11-20T20:37:14,484 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/.tmp/ns/133822a6a0a04412a84a465e58580c72 2024-11-20T20:37:14,485 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=d7d59561a64e:39077 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-20T20:37:14,491 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [9e0ee85716294d99bc4afc832906038f=12509, 14469cef47454f45a6222a7e7a0384c2=12509, f39b23eaff2c4f6fa039f2ba3e96035c=12509] 2024-11-20T20:37:14,497 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/default/TestLogRolling-testSlowSyncLogRolling/432451b2c58c0e3e3b8fe124f450280f/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-20T20:37:14,499 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. 2024-11-20T20:37:14,499 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 432451b2c58c0e3e3b8fe124f450280f: Waiting for close lock at 1732135034414Running coprocessor pre-close hooks at 1732135034415 (+1 ms)Disabling compacts and flushes for region at 1732135034415Disabling writes for close at 1732135034416 (+1 ms)Obtaining lock to block concurrent updates at 1732135034416Preparing flush snapshotting stores in 432451b2c58c0e3e3b8fe124f450280f at 1732135034416Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732135034417 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. at 1732135034418 (+1 ms)Flushing 432451b2c58c0e3e3b8fe124f450280f/info: creating writer at 1732135034418Flushing 432451b2c58c0e3e3b8fe124f450280f/info: appending metadata at 1732135034422 (+4 ms)Flushing 432451b2c58c0e3e3b8fe124f450280f/info: closing flushed file at 1732135034422Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1130ef3a: reopening flushed file at 1732135034442 (+20 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 432451b2c58c0e3e3b8fe124f450280f in 41ms, sequenceid=48, compaction requested=true at 1732135034457 (+15 ms)Writing region close event to WAL at 1732135034492 (+35 ms)Running coprocessor post-close hooks at 1732135034497 (+5 ms)Closed at 1732135034499 (+2 ms) 2024-11-20T20:37:14,500 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732134941650.432451b2c58c0e3e3b8fe124f450280f. 2024-11-20T20:37:14,510 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/.tmp/table/656748798c434826a14d24f908b80042 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732134942448/Put/seqid=0 2024-11-20T20:37:14,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741853_1029 (size=5396) 2024-11-20T20:37:14,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741853_1029 (size=5396) 2024-11-20T20:37:14,517 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/.tmp/table/656748798c434826a14d24f908b80042 2024-11-20T20:37:14,526 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/.tmp/info/33dedc55ec874122ab2d825457a48a72 as hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/info/33dedc55ec874122ab2d825457a48a72 2024-11-20T20:37:14,536 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/info/33dedc55ec874122ab2d825457a48a72, entries=10, sequenceid=11, filesize=6.9 K 2024-11-20T20:37:14,538 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/.tmp/ns/133822a6a0a04412a84a465e58580c72 as hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/ns/133822a6a0a04412a84a465e58580c72 2024-11-20T20:37:14,545 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/ns/133822a6a0a04412a84a465e58580c72, entries=2, sequenceid=11, filesize=5.0 K 2024-11-20T20:37:14,546 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/.tmp/table/656748798c434826a14d24f908b80042 as hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/table/656748798c434826a14d24f908b80042 2024-11-20T20:37:14,554 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/table/656748798c434826a14d24f908b80042, entries=2, sequenceid=11, filesize=5.3 K 2024-11-20T20:37:14,556 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false 2024-11-20T20:37:14,561 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-20T20:37:14,562 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T20:37:14,562 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T20:37:14,562 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732135034416Running coprocessor pre-close hooks at 1732135034416Disabling compacts and flushes for region at 1732135034416Disabling writes for close at 1732135034417 (+1 ms)Obtaining lock to block concurrent updates at 1732135034417Preparing flush snapshotting stores in 1588230740 at 1732135034417Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732135034418 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732135034419 (+1 ms)Flushing 1588230740/info: creating writer at 1732135034419Flushing 1588230740/info: appending metadata at 1732135034439 (+20 ms)Flushing 1588230740/info: closing flushed file at 1732135034439Flushing 1588230740/ns: creating writer at 1732135034459 (+20 ms)Flushing 1588230740/ns: appending metadata at 1732135034476 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732135034476Flushing 1588230740/table: creating writer at 1732135034494 (+18 ms)Flushing 1588230740/table: appending metadata at 1732135034509 (+15 ms)Flushing 1588230740/table: closing flushed file at 1732135034510 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c25d3a4: reopening flushed file at 1732135034525 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@623433fc: reopening flushed file at 1732135034536 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bcf7d73: reopening flushed file at 1732135034545 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false at 1732135034556 (+11 ms)Writing region close event to WAL at 1732135034557 (+1 ms)Running coprocessor post-close hooks at 1732135034562 (+5 ms)Closed at 1732135034562 2024-11-20T20:37:14,562 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T20:37:14,617 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer(976): stopping server d7d59561a64e,41209,1732134937306; all regions closed. 2024-11-20T20:37:14,620 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:14,621 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:14,621 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:14,621 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:14,622 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:14,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741834_1010 (size=3066) 2024-11-20T20:37:14,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741834_1010 (size=3066) 2024-11-20T20:37:14,630 DEBUG [RS:0;d7d59561a64e:41209 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/oldWALs 2024-11-20T20:37:14,631 INFO [RS:0;d7d59561a64e:41209 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d7d59561a64e%2C41209%2C1732134937306.meta:.meta(num 1732134940688) 2024-11-20T20:37:14,631 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:14,631 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:14,631 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:14,632 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:14,632 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:14,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741847_1023 (size=12695) 2024-11-20T20:37:14,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741847_1023 (size=12695) 2024-11-20T20:37:14,638 DEBUG [RS:0;d7d59561a64e:41209 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/oldWALs 2024-11-20T20:37:14,638 INFO [RS:0;d7d59561a64e:41209 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d7d59561a64e%2C41209%2C1732134937306:(num 1732135014260) 2024-11-20T20:37:14,638 DEBUG [RS:0;d7d59561a64e:41209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:37:14,638 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T20:37:14,638 INFO [RS:0;d7d59561a64e:41209 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T20:37:14,638 INFO [RS:0;d7d59561a64e:41209 {}] hbase.ChoreService(370): Chore service for: regionserver/d7d59561a64e:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T20:37:14,639 INFO [RS:0;d7d59561a64e:41209 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T20:37:14,639 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T20:37:14,639 INFO [RS:0;d7d59561a64e:41209 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41209 2024-11-20T20:37:14,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d7d59561a64e,41209,1732134937306 2024-11-20T20:37:14,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T20:37:14,650 INFO [RS:0;d7d59561a64e:41209 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T20:37:14,662 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d7d59561a64e,41209,1732134937306] 2024-11-20T20:37:14,672 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d7d59561a64e,41209,1732134937306 already deleted, retry=false 2024-11-20T20:37:14,672 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d7d59561a64e,41209,1732134937306 expired; onlineServers=0 2024-11-20T20:37:14,672 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd7d59561a64e,39077,1732134935979' ***** 2024-11-20T20:37:14,672 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T20:37:14,673 INFO [M:0;d7d59561a64e:39077 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T20:37:14,673 INFO [M:0;d7d59561a64e:39077 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T20:37:14,673 DEBUG [M:0;d7d59561a64e:39077 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T20:37:14,673 DEBUG [M:0;d7d59561a64e:39077 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T20:37:14,673 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T20:37:14,674 DEBUG [master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732134939787 {}] cleaner.HFileCleaner(306): Exit Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732134939787,5,FailOnTimeoutGroup] 2024-11-20T20:37:14,674 DEBUG [master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732134939781 {}] cleaner.HFileCleaner(306): Exit Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732134939781,5,FailOnTimeoutGroup] 2024-11-20T20:37:14,674 INFO [M:0;d7d59561a64e:39077 {}] hbase.ChoreService(370): Chore service for: master/d7d59561a64e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T20:37:14,674 INFO [M:0;d7d59561a64e:39077 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T20:37:14,675 DEBUG [M:0;d7d59561a64e:39077 {}] master.HMaster(1795): Stopping service threads 2024-11-20T20:37:14,675 INFO [M:0;d7d59561a64e:39077 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T20:37:14,675 INFO [M:0;d7d59561a64e:39077 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T20:37:14,676 INFO [M:0;d7d59561a64e:39077 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T20:37:14,676 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T20:37:14,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T20:37:14,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:14,683 DEBUG [M:0;d7d59561a64e:39077 {}] zookeeper.ZKUtil(347): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T20:37:14,683 WARN [M:0;d7d59561a64e:39077 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T20:37:14,685 INFO [M:0;d7d59561a64e:39077 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/.lastflushedseqids 2024-11-20T20:37:14,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741854_1030 (size=130) 2024-11-20T20:37:14,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741854_1030 (size=130) 2024-11-20T20:37:14,703 INFO [M:0;d7d59561a64e:39077 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T20:37:14,703 INFO [M:0;d7d59561a64e:39077 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T20:37:14,703 DEBUG [M:0;d7d59561a64e:39077 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T20:37:14,703 INFO [M:0;d7d59561a64e:39077 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:37:14,703 DEBUG [M:0;d7d59561a64e:39077 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:37:14,703 DEBUG [M:0;d7d59561a64e:39077 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T20:37:14,703 DEBUG [M:0;d7d59561a64e:39077 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:37:14,704 INFO [M:0;d7d59561a64e:39077 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-20T20:37:14,721 DEBUG [M:0;d7d59561a64e:39077 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d87407fc47a441f38f146a610c66286e is 82, key is hbase:meta,,1/info:regioninfo/1732134940813/Put/seqid=0 2024-11-20T20:37:14,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741855_1031 (size=5672) 2024-11-20T20:37:14,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741855_1031 (size=5672) 2024-11-20T20:37:14,728 INFO [M:0;d7d59561a64e:39077 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d87407fc47a441f38f146a610c66286e 2024-11-20T20:37:14,750 DEBUG [M:0;d7d59561a64e:39077 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/baae50f227b94e1bb9371be9934c0b6a is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732134942469/Put/seqid=0 2024-11-20T20:37:14,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741856_1032 (size=6247) 2024-11-20T20:37:14,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741856_1032 (size=6247) 2024-11-20T20:37:14,756 INFO [M:0;d7d59561a64e:39077 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/baae50f227b94e1bb9371be9934c0b6a 2024-11-20T20:37:14,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:37:14,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41209-0x10159f5945f0001, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:37:14,762 INFO [M:0;d7d59561a64e:39077 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for baae50f227b94e1bb9371be9934c0b6a 2024-11-20T20:37:14,762 INFO [RS:0;d7d59561a64e:41209 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T20:37:14,762 INFO [RS:0;d7d59561a64e:41209 {}] regionserver.HRegionServer(1031): Exiting; stopping=d7d59561a64e,41209,1732134937306; zookeeper connection closed. 2024-11-20T20:37:14,763 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@447c3017 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@447c3017 2024-11-20T20:37:14,763 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T20:37:14,777 DEBUG [M:0;d7d59561a64e:39077 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b852c18edd45492192db2d22d8959a50 is 69, key is d7d59561a64e,41209,1732134937306/rs:state/1732134939864/Put/seqid=0 2024-11-20T20:37:14,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741857_1033 (size=5156) 2024-11-20T20:37:14,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741857_1033 (size=5156) 2024-11-20T20:37:14,784 INFO [M:0;d7d59561a64e:39077 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b852c18edd45492192db2d22d8959a50 2024-11-20T20:37:14,813 DEBUG [M:0;d7d59561a64e:39077 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/092fd4ae5ae5495291effee2e0594380 is 52, key is load_balancer_on/state:d/1732134941599/Put/seqid=0 2024-11-20T20:37:14,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741858_1034 (size=5056) 2024-11-20T20:37:14,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741858_1034 (size=5056) 2024-11-20T20:37:14,820 INFO [M:0;d7d59561a64e:39077 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/092fd4ae5ae5495291effee2e0594380 2024-11-20T20:37:14,830 DEBUG [M:0;d7d59561a64e:39077 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d87407fc47a441f38f146a610c66286e as hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d87407fc47a441f38f146a610c66286e 2024-11-20T20:37:14,838 INFO [M:0;d7d59561a64e:39077 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d87407fc47a441f38f146a610c66286e, entries=8, sequenceid=59, filesize=5.5 K 2024-11-20T20:37:14,839 DEBUG [M:0;d7d59561a64e:39077 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/baae50f227b94e1bb9371be9934c0b6a as hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/baae50f227b94e1bb9371be9934c0b6a 2024-11-20T20:37:14,847 INFO [M:0;d7d59561a64e:39077 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for baae50f227b94e1bb9371be9934c0b6a 2024-11-20T20:37:14,847 INFO [M:0;d7d59561a64e:39077 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/baae50f227b94e1bb9371be9934c0b6a, entries=6, sequenceid=59, filesize=6.1 K 2024-11-20T20:37:14,848 DEBUG [M:0;d7d59561a64e:39077 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b852c18edd45492192db2d22d8959a50 as hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b852c18edd45492192db2d22d8959a50 2024-11-20T20:37:14,857 INFO [M:0;d7d59561a64e:39077 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b852c18edd45492192db2d22d8959a50, entries=1, sequenceid=59, filesize=5.0 K 2024-11-20T20:37:14,859 DEBUG [M:0;d7d59561a64e:39077 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/092fd4ae5ae5495291effee2e0594380 as hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/092fd4ae5ae5495291effee2e0594380 2024-11-20T20:37:14,868 INFO [M:0;d7d59561a64e:39077 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/092fd4ae5ae5495291effee2e0594380, entries=1, sequenceid=59, filesize=4.9 K 2024-11-20T20:37:14,870 INFO [M:0;d7d59561a64e:39077 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=59, compaction requested=false 2024-11-20T20:37:14,872 INFO [M:0;d7d59561a64e:39077 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:37:14,873 DEBUG [M:0;d7d59561a64e:39077 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732135034703Disabling compacts and flushes for region at 1732135034703Disabling writes for close at 1732135034703Obtaining lock to block concurrent updates at 1732135034704 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732135034704Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732135034704Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732135034705 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732135034705Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732135034720 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732135034720Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732135034734 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732135034749 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732135034749Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732135034762 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732135034777 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732135034777Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732135034791 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732135034812 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732135034812Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62a18a94: reopening flushed file at 1732135034828 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36527d4: reopening flushed file at 1732135034838 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@209a8d43: reopening flushed file at 1732135034847 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a522e3d: reopening flushed file at 1732135034857 (+10 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=59, compaction requested=false at 1732135034870 (+13 ms)Writing region close event to WAL at 1732135034872 (+2 ms)Closed at 1732135034872 2024-11-20T20:37:14,876 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:14,876 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:14,877 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:14,877 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:14,877 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:14,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741830_1006 (size=27973) 2024-11-20T20:37:14,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46327 is added to blk_1073741830_1006 (size=27973) 2024-11-20T20:37:14,883 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T20:37:14,883 INFO [M:0;d7d59561a64e:39077 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T20:37:14,883 INFO [M:0;d7d59561a64e:39077 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39077 2024-11-20T20:37:14,883 INFO [M:0;d7d59561a64e:39077 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T20:37:15,035 INFO [M:0;d7d59561a64e:39077 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T20:37:15,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:37:15,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39077-0x10159f5945f0000, quorum=127.0.0.1:59326, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:37:15,040 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c963ecd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:37:15,042 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a10aed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:37:15,042 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:37:15,042 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25ca9bb3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:37:15,042 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ff5148a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/hadoop.log.dir/,STOPPED} 2024-11-20T20:37:15,045 WARN [BP-1586463130-172.17.0.2-1732134929023 heartbeating to localhost/127.0.0.1:42437 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:37:15,045 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:37:15,045 WARN [BP-1586463130-172.17.0.2-1732134929023 heartbeating to localhost/127.0.0.1:42437 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1586463130-172.17.0.2-1732134929023 (Datanode Uuid 22152500-c199-4d70-afdd-965b92c17c61) service to localhost/127.0.0.1:42437 2024-11-20T20:37:15,045 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:37:15,046 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/cluster_b9a71e61-fd07-5b7b-2c21-0ad8e2ea8251/data/data3/current/BP-1586463130-172.17.0.2-1732134929023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:37:15,046 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/cluster_b9a71e61-fd07-5b7b-2c21-0ad8e2ea8251/data/data4/current/BP-1586463130-172.17.0.2-1732134929023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:37:15,047 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:37:15,049 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f93babe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:37:15,049 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@737d6c99{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:37:15,049 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:37:15,049 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7893eb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:37:15,050 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3305dd74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/hadoop.log.dir/,STOPPED} 2024-11-20T20:37:15,051 WARN [BP-1586463130-172.17.0.2-1732134929023 heartbeating to localhost/127.0.0.1:42437 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:37:15,051 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:37:15,051 WARN [BP-1586463130-172.17.0.2-1732134929023 heartbeating to localhost/127.0.0.1:42437 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1586463130-172.17.0.2-1732134929023 (Datanode Uuid dc5b1e8b-d666-49ce-a9f2-894fc7b0d778) service to localhost/127.0.0.1:42437 2024-11-20T20:37:15,051 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:37:15,051 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/cluster_b9a71e61-fd07-5b7b-2c21-0ad8e2ea8251/data/data1/current/BP-1586463130-172.17.0.2-1732134929023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:37:15,052 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/cluster_b9a71e61-fd07-5b7b-2c21-0ad8e2ea8251/data/data2/current/BP-1586463130-172.17.0.2-1732134929023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:37:15,052 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:37:15,064 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6de997b9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T20:37:15,065 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a0da00a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:37:15,065 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:37:15,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@380b8195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:37:15,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aee6cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/hadoop.log.dir/,STOPPED} 2024-11-20T20:37:15,074 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T20:37:15,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T20:37:15,115 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=82 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42437 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42437 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:42437 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/d7d59561a64e:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42437 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@3d82ef6c java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:42437 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:42437 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:42437 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/d7d59561a64e:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/d7d59561a64e:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/d7d59561a64e:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42437 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=450 (was 901), ProcessCount=11 (was 11), AvailableMemoryMB=2020 (was 2476) 2024-11-20T20:37:15,121 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=83, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=450, ProcessCount=11, AvailableMemoryMB=2019 2024-11-20T20:37:15,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T20:37:15,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/hadoop.log.dir so I do NOT create it in target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251 2024-11-20T20:37:15,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d792dff5-1466-b460-6572-a45cd1d6842c/hadoop.tmp.dir so I do NOT create it in target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251 2024-11-20T20:37:15,122 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/cluster_82c09e8d-7a01-1943-eb7a-b3386e765784, deleteOnExit=true 2024-11-20T20:37:15,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T20:37:15,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/test.cache.data in system properties and HBase conf 2024-11-20T20:37:15,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T20:37:15,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/hadoop.log.dir in system properties and HBase conf 2024-11-20T20:37:15,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T20:37:15,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T20:37:15,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T20:37:15,123 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T20:37:15,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T20:37:15,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T20:37:15,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T20:37:15,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T20:37:15,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T20:37:15,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T20:37:15,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T20:37:15,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T20:37:15,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T20:37:15,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/nfs.dump.dir in system properties and HBase conf 2024-11-20T20:37:15,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/java.io.tmpdir in system properties and HBase conf 2024-11-20T20:37:15,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T20:37:15,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T20:37:15,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T20:37:15,138 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T20:37:15,495 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:37:15,502 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:37:15,504 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:37:15,504 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:37:15,504 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T20:37:15,505 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:37:15,505 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5686c999{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:37:15,506 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69fa0c88{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:37:15,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5bc93f4{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/java.io.tmpdir/jetty-localhost-35641-hadoop-hdfs-3_4_1-tests_jar-_-any-10347441001144553115/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T20:37:15,604 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ec80534{HTTP/1.1, (http/1.1)}{localhost:35641} 2024-11-20T20:37:15,604 INFO [Time-limited test {}] server.Server(415): Started @109310ms 2024-11-20T20:37:15,616 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T20:37:15,868 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:37:15,873 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:37:15,874 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:37:15,874 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:37:15,874 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T20:37:15,875 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5357e169{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:37:15,875 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39fb5954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:37:15,972 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77ff58b7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/java.io.tmpdir/jetty-localhost-37165-hadoop-hdfs-3_4_1-tests_jar-_-any-2504880155349458539/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:37:15,972 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9d8d6d6{HTTP/1.1, (http/1.1)}{localhost:37165} 2024-11-20T20:37:15,972 INFO [Time-limited test {}] server.Server(415): Started @109679ms 2024-11-20T20:37:15,974 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:37:16,006 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:37:16,009 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:37:16,010 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:37:16,010 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:37:16,010 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T20:37:16,011 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7130e643{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:37:16,011 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a17cc25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:37:16,036 INFO [regionserver/d7d59561a64e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T20:37:16,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a329c51{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/java.io.tmpdir/jetty-localhost-35951-hadoop-hdfs-3_4_1-tests_jar-_-any-14060038643270544762/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:37:16,110 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d8729f1{HTTP/1.1, (http/1.1)}{localhost:35951} 2024-11-20T20:37:16,110 INFO [Time-limited test {}] server.Server(415): Started @109816ms 2024-11-20T20:37:16,112 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:37:16,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T20:37:16,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T20:37:16,488 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T20:37:16,489 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-20T20:37:17,797 WARN [Thread-454 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/cluster_82c09e8d-7a01-1943-eb7a-b3386e765784/data/data1/current/BP-1578448213-172.17.0.2-1732135035150/current, will proceed with Du for space computation calculation, 2024-11-20T20:37:17,797 WARN [Thread-455 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/cluster_82c09e8d-7a01-1943-eb7a-b3386e765784/data/data2/current/BP-1578448213-172.17.0.2-1732135035150/current, will proceed with Du for space computation calculation, 2024-11-20T20:37:17,813 WARN [Thread-418 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:37:17,815 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xab7af7c8c38be906 with lease ID 0x9ee61e09ca317f5f: Processing first storage report for DS-11b8d9e3-5a9b-4c62-95e8-507e54548c71 from datanode DatanodeRegistration(127.0.0.1:38373, datanodeUuid=1af8496b-6fd3-42af-8fee-66f549a04cdb, infoPort=35079, infoSecurePort=0, ipcPort=33117, storageInfo=lv=-57;cid=testClusterID;nsid=1183429056;c=1732135035150) 2024-11-20T20:37:17,815 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab7af7c8c38be906 with lease ID 0x9ee61e09ca317f5f: from storage DS-11b8d9e3-5a9b-4c62-95e8-507e54548c71 node DatanodeRegistration(127.0.0.1:38373, datanodeUuid=1af8496b-6fd3-42af-8fee-66f549a04cdb, infoPort=35079, infoSecurePort=0, ipcPort=33117, storageInfo=lv=-57;cid=testClusterID;nsid=1183429056;c=1732135035150), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:37:17,815 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xab7af7c8c38be906 with lease ID 0x9ee61e09ca317f5f: Processing first storage report for DS-2b68de44-b7df-4252-a00d-b366d9080bf1 from datanode DatanodeRegistration(127.0.0.1:38373, datanodeUuid=1af8496b-6fd3-42af-8fee-66f549a04cdb, infoPort=35079, infoSecurePort=0, ipcPort=33117, storageInfo=lv=-57;cid=testClusterID;nsid=1183429056;c=1732135035150) 2024-11-20T20:37:17,815 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab7af7c8c38be906 with lease ID 0x9ee61e09ca317f5f: from storage DS-2b68de44-b7df-4252-a00d-b366d9080bf1 node DatanodeRegistration(127.0.0.1:38373, datanodeUuid=1af8496b-6fd3-42af-8fee-66f549a04cdb, infoPort=35079, infoSecurePort=0, ipcPort=33117, storageInfo=lv=-57;cid=testClusterID;nsid=1183429056;c=1732135035150), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:37:17,929 WARN [Thread-466 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/cluster_82c09e8d-7a01-1943-eb7a-b3386e765784/data/data4/current/BP-1578448213-172.17.0.2-1732135035150/current, will proceed with Du for space computation calculation, 2024-11-20T20:37:17,929 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/cluster_82c09e8d-7a01-1943-eb7a-b3386e765784/data/data3/current/BP-1578448213-172.17.0.2-1732135035150/current, will proceed with Du for space computation calculation, 2024-11-20T20:37:17,950 WARN [Thread-441 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:37:17,953 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc1842473c53e531a with lease ID 0x9ee61e09ca317f60: Processing first storage report for DS-5799ced3-1aba-4aea-a699-6bff088dc35c from datanode DatanodeRegistration(127.0.0.1:37597, datanodeUuid=b70d4a19-66aa-443f-b659-047836a92a85, infoPort=45589, infoSecurePort=0, ipcPort=32775, storageInfo=lv=-57;cid=testClusterID;nsid=1183429056;c=1732135035150) 2024-11-20T20:37:17,953 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc1842473c53e531a with lease ID 0x9ee61e09ca317f60: from storage DS-5799ced3-1aba-4aea-a699-6bff088dc35c node DatanodeRegistration(127.0.0.1:37597, datanodeUuid=b70d4a19-66aa-443f-b659-047836a92a85, infoPort=45589, infoSecurePort=0, ipcPort=32775, storageInfo=lv=-57;cid=testClusterID;nsid=1183429056;c=1732135035150), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T20:37:17,953 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc1842473c53e531a with lease ID 0x9ee61e09ca317f60: Processing first storage report for DS-747ae692-8d67-4a04-868f-06f37abcbd07 from datanode DatanodeRegistration(127.0.0.1:37597, datanodeUuid=b70d4a19-66aa-443f-b659-047836a92a85, infoPort=45589, infoSecurePort=0, ipcPort=32775, storageInfo=lv=-57;cid=testClusterID;nsid=1183429056;c=1732135035150) 2024-11-20T20:37:17,953 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc1842473c53e531a with lease ID 0x9ee61e09ca317f60: from storage DS-747ae692-8d67-4a04-868f-06f37abcbd07 node DatanodeRegistration(127.0.0.1:37597, datanodeUuid=b70d4a19-66aa-443f-b659-047836a92a85, infoPort=45589, infoSecurePort=0, ipcPort=32775, storageInfo=lv=-57;cid=testClusterID;nsid=1183429056;c=1732135035150), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:37:18,057 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251 2024-11-20T20:37:18,060 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/cluster_82c09e8d-7a01-1943-eb7a-b3386e765784/zookeeper_0, clientPort=58745, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/cluster_82c09e8d-7a01-1943-eb7a-b3386e765784/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/cluster_82c09e8d-7a01-1943-eb7a-b3386e765784/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T20:37:18,062 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58745 2024-11-20T20:37:18,062 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:18,064 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:18,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37597 is added to blk_1073741825_1001 (size=7) 2024-11-20T20:37:18,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38373 is added to blk_1073741825_1001 (size=7) 2024-11-20T20:37:18,076 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7 with version=8 2024-11-20T20:37:18,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/hbase-staging 2024-11-20T20:37:18,079 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d7d59561a64e:0 server-side Connection retries=45 2024-11-20T20:37:18,079 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:37:18,079 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T20:37:18,079 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T20:37:18,079 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:37:18,079 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T20:37:18,079 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T20:37:18,079 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T20:37:18,080 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37237 2024-11-20T20:37:18,082 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37237 connecting to ZooKeeper ensemble=127.0.0.1:58745 2024-11-20T20:37:18,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:372370x0, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T20:37:18,148 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37237-0x10159f728160000 connected 2024-11-20T20:37:18,231 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:18,233 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:18,236 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:37:18,236 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7, hbase.cluster.distributed=false 2024-11-20T20:37:18,238 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T20:37:18,239 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37237 2024-11-20T20:37:18,239 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37237 2024-11-20T20:37:18,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37237 2024-11-20T20:37:18,245 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37237 2024-11-20T20:37:18,245 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37237 2024-11-20T20:37:18,259 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d7d59561a64e:0 server-side Connection retries=45 2024-11-20T20:37:18,259 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:37:18,260 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T20:37:18,260 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T20:37:18,260 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:37:18,260 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T20:37:18,260 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T20:37:18,260 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T20:37:18,261 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42007 2024-11-20T20:37:18,262 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42007 connecting to ZooKeeper ensemble=127.0.0.1:58745 2024-11-20T20:37:18,263 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:18,265 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:18,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:420070x0, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T20:37:18,272 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42007-0x10159f728160001 connected 2024-11-20T20:37:18,272 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:37:18,272 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T20:37:18,274 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T20:37:18,275 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T20:37:18,276 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T20:37:18,278 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42007 2024-11-20T20:37:18,278 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42007 2024-11-20T20:37:18,279 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42007 2024-11-20T20:37:18,279 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42007 2024-11-20T20:37:18,280 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42007 2024-11-20T20:37:18,293 DEBUG [M:0;d7d59561a64e:37237 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d7d59561a64e:37237 2024-11-20T20:37:18,293 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d7d59561a64e,37237,1732135038078 2024-11-20T20:37:18,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:37:18,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:37:18,304 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d7d59561a64e,37237,1732135038078 2024-11-20T20:37:18,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:18,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T20:37:18,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:18,314 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T20:37:18,315 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d7d59561a64e,37237,1732135038078 from backup master directory 2024-11-20T20:37:18,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d7d59561a64e,37237,1732135038078 2024-11-20T20:37:18,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:37:18,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:37:18,324 WARN [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T20:37:18,324 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d7d59561a64e,37237,1732135038078 2024-11-20T20:37:18,330 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/hbase.id] with ID: 155795b7-0f3d-4e68-b70a-06c6a6a5f04a 2024-11-20T20:37:18,330 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/.tmp/hbase.id 2024-11-20T20:37:18,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37597 is added to blk_1073741826_1002 (size=42) 2024-11-20T20:37:18,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38373 is added to blk_1073741826_1002 (size=42) 2024-11-20T20:37:18,340 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/.tmp/hbase.id]:[hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/hbase.id] 2024-11-20T20:37:18,358 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:18,358 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T20:37:18,361 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-11-20T20:37:18,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:18,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:18,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38373 is added to blk_1073741827_1003 (size=196) 2024-11-20T20:37:18,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37597 is added to blk_1073741827_1003 (size=196) 2024-11-20T20:37:18,379 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T20:37:18,380 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T20:37:18,380 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:37:18,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37597 is added to blk_1073741828_1004 (size=1189) 2024-11-20T20:37:18,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38373 is added to blk_1073741828_1004 (size=1189) 2024-11-20T20:37:18,391 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store 2024-11-20T20:37:18,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38373 is added to blk_1073741829_1005 (size=34) 2024-11-20T20:37:18,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37597 is added to blk_1073741829_1005 (size=34) 2024-11-20T20:37:18,402 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:37:18,402 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T20:37:18,402 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:37:18,402 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:37:18,402 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T20:37:18,402 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:37:18,402 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:37:18,403 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732135038402Disabling compacts and flushes for region at 1732135038402Disabling writes for close at 1732135038402Writing region close event to WAL at 1732135038402Closed at 1732135038402 2024-11-20T20:37:18,404 WARN [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/.initializing 2024-11-20T20:37:18,404 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/WALs/d7d59561a64e,37237,1732135038078 2024-11-20T20:37:18,408 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C37237%2C1732135038078, suffix=, logDir=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/WALs/d7d59561a64e,37237,1732135038078, archiveDir=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/oldWALs, maxLogs=10 2024-11-20T20:37:18,409 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C37237%2C1732135038078.1732135038409 2024-11-20T20:37:18,416 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/WALs/d7d59561a64e,37237,1732135038078/d7d59561a64e%2C37237%2C1732135038078.1732135038409 2024-11-20T20:37:18,420 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45589:45589),(127.0.0.1/127.0.0.1:35079:35079)] 2024-11-20T20:37:18,421 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:37:18,422 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:37:18,422 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:18,422 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:18,424 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:18,426 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T20:37:18,426 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:18,427 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:18,427 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:18,429 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T20:37:18,429 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:18,429 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:37:18,430 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:18,433 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T20:37:18,433 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:18,434 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:37:18,434 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:18,436 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T20:37:18,437 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:18,441 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:37:18,441 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:18,442 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:18,442 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:18,444 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:18,444 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:18,444 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T20:37:18,446 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:18,448 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:37:18,449 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=830664, jitterRate=0.056244730949401855}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T20:37:18,450 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732135038422Initializing all the Stores at 1732135038423 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135038424 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135038424Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135038424Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135038424Cleaning up temporary data from old regions at 1732135038444 (+20 ms)Region opened successfully at 1732135038450 (+6 ms) 2024-11-20T20:37:18,450 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T20:37:18,455 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a36327e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d7d59561a64e/172.17.0.2:0 2024-11-20T20:37:18,456 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T20:37:18,456 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T20:37:18,456 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T20:37:18,457 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T20:37:18,457 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T20:37:18,458 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T20:37:18,458 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T20:37:18,465 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T20:37:18,466 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T20:37:18,471 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T20:37:18,472 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T20:37:18,473 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T20:37:18,482 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T20:37:18,483 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T20:37:18,485 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T20:37:18,492 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T20:37:18,494 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T20:37:18,503 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T20:37:18,506 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T20:37:18,513 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T20:37:18,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T20:37:18,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T20:37:18,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:18,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:18,525 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d7d59561a64e,37237,1732135038078, sessionid=0x10159f728160000, setting cluster-up flag (Was=false) 2024-11-20T20:37:18,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:18,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:18,577 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T20:37:18,579 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d7d59561a64e,37237,1732135038078 2024-11-20T20:37:18,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:18,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:18,630 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T20:37:18,635 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d7d59561a64e,37237,1732135038078 2024-11-20T20:37:18,638 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T20:37:18,645 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T20:37:18,646 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T20:37:18,646 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T20:37:18,646 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d7d59561a64e,37237,1732135038078 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T20:37:18,648 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:37:18,648 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:37:18,649 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:37:18,649 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:37:18,649 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d7d59561a64e:0, corePoolSize=10, maxPoolSize=10 2024-11-20T20:37:18,649 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:18,649 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d7d59561a64e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T20:37:18,649 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:18,651 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:37:18,651 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T20:37:18,652 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:18,652 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732135068652 2024-11-20T20:37:18,652 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T20:37:18,652 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T20:37:18,653 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T20:37:18,653 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T20:37:18,652 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T20:37:18,653 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T20:37:18,653 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T20:37:18,656 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:18,661 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T20:37:18,661 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T20:37:18,661 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T20:37:18,661 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T20:37:18,661 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T20:37:18,663 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135038661,5,FailOnTimeoutGroup] 2024-11-20T20:37:18,664 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135038663,5,FailOnTimeoutGroup] 2024-11-20T20:37:18,664 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:18,664 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T20:37:18,664 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:18,664 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:18,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37597 is added to blk_1073741831_1007 (size=1321) 2024-11-20T20:37:18,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38373 is added to blk_1073741831_1007 (size=1321) 2024-11-20T20:37:18,668 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T20:37:18,669 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7 2024-11-20T20:37:18,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38373 is added to blk_1073741832_1008 (size=32) 2024-11-20T20:37:18,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37597 is added to blk_1073741832_1008 (size=32) 2024-11-20T20:37:18,683 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:37:18,685 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.HRegionServer(746): ClusterId : 155795b7-0f3d-4e68-b70a-06c6a6a5f04a 2024-11-20T20:37:18,685 DEBUG [RS:0;d7d59561a64e:42007 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T20:37:18,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T20:37:18,692 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T20:37:18,692 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:18,693 DEBUG [RS:0;d7d59561a64e:42007 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T20:37:18,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:18,693 DEBUG [RS:0;d7d59561a64e:42007 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T20:37:18,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T20:37:18,695 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T20:37:18,695 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:18,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:18,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T20:37:18,698 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T20:37:18,698 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:18,699 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:18,699 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T20:37:18,701 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T20:37:18,702 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:18,702 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:18,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T20:37:18,704 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/data/hbase/meta/1588230740 2024-11-20T20:37:18,704 DEBUG [RS:0;d7d59561a64e:42007 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T20:37:18,704 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/data/hbase/meta/1588230740 2024-11-20T20:37:18,704 DEBUG [RS:0;d7d59561a64e:42007 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c020847, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d7d59561a64e/172.17.0.2:0 2024-11-20T20:37:18,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T20:37:18,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T20:37:18,706 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T20:37:18,708 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T20:37:18,711 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:37:18,712 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862428, jitterRate=0.09663434326648712}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T20:37:18,713 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732135038683Initializing all the Stores at 1732135038684 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135038685 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135038690 (+5 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135038690Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135038690Cleaning up temporary data from old regions at 1732135038706 (+16 ms)Region opened successfully at 1732135038713 (+7 ms) 2024-11-20T20:37:18,713 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T20:37:18,714 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T20:37:18,714 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T20:37:18,714 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T20:37:18,714 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T20:37:18,714 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T20:37:18,714 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732135038713Disabling compacts and flushes for region at 1732135038713Disabling writes for close at 1732135038714 (+1 ms)Writing region close event to WAL at 1732135038714Closed at 1732135038714 2024-11-20T20:37:18,716 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:37:18,716 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T20:37:18,716 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T20:37:18,719 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T20:37:18,720 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T20:37:18,722 DEBUG [RS:0;d7d59561a64e:42007 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d7d59561a64e:42007 2024-11-20T20:37:18,722 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T20:37:18,722 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T20:37:18,722 DEBUG [RS:0;d7d59561a64e:42007 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T20:37:18,723 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.HRegionServer(2659): reportForDuty to master=d7d59561a64e,37237,1732135038078 with port=42007, startcode=1732135038259 2024-11-20T20:37:18,724 DEBUG [RS:0;d7d59561a64e:42007 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T20:37:18,726 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50751, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T20:37:18,727 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37237 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d7d59561a64e,42007,1732135038259 2024-11-20T20:37:18,727 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37237 {}] master.ServerManager(517): Registering regionserver=d7d59561a64e,42007,1732135038259 2024-11-20T20:37:18,730 DEBUG [RS:0;d7d59561a64e:42007 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7 2024-11-20T20:37:18,730 DEBUG [RS:0;d7d59561a64e:42007 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40197 2024-11-20T20:37:18,730 DEBUG [RS:0;d7d59561a64e:42007 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T20:37:18,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T20:37:18,735 DEBUG [RS:0;d7d59561a64e:42007 {}] zookeeper.ZKUtil(111): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d7d59561a64e,42007,1732135038259 2024-11-20T20:37:18,735 WARN [RS:0;d7d59561a64e:42007 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T20:37:18,736 INFO [RS:0;d7d59561a64e:42007 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:37:18,736 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d7d59561a64e,42007,1732135038259] 2024-11-20T20:37:18,736 DEBUG [RS:0;d7d59561a64e:42007 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/WALs/d7d59561a64e,42007,1732135038259 2024-11-20T20:37:18,741 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T20:37:18,748 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T20:37:18,749 INFO [RS:0;d7d59561a64e:42007 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T20:37:18,749 INFO [RS:0;d7d59561a64e:42007 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:18,752 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T20:37:18,754 INFO [RS:0;d7d59561a64e:42007 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T20:37:18,754 INFO [RS:0;d7d59561a64e:42007 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:18,754 DEBUG [RS:0;d7d59561a64e:42007 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:18,754 DEBUG [RS:0;d7d59561a64e:42007 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:18,755 DEBUG [RS:0;d7d59561a64e:42007 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:18,755 DEBUG [RS:0;d7d59561a64e:42007 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:18,755 DEBUG [RS:0;d7d59561a64e:42007 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:18,755 DEBUG [RS:0;d7d59561a64e:42007 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d7d59561a64e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T20:37:18,755 DEBUG [RS:0;d7d59561a64e:42007 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:18,755 DEBUG [RS:0;d7d59561a64e:42007 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:18,755 DEBUG [RS:0;d7d59561a64e:42007 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:18,755 DEBUG [RS:0;d7d59561a64e:42007 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:18,755 DEBUG [RS:0;d7d59561a64e:42007 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:18,756 DEBUG [RS:0;d7d59561a64e:42007 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:18,756 DEBUG [RS:0;d7d59561a64e:42007 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d7d59561a64e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T20:37:18,756 DEBUG [RS:0;d7d59561a64e:42007 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T20:37:18,760 INFO [RS:0;d7d59561a64e:42007 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:18,761 INFO [RS:0;d7d59561a64e:42007 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:18,761 INFO [RS:0;d7d59561a64e:42007 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:18,761 INFO [RS:0;d7d59561a64e:42007 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:18,761 INFO [RS:0;d7d59561a64e:42007 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:18,761 INFO [RS:0;d7d59561a64e:42007 {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,42007,1732135038259-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T20:37:18,783 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T20:37:18,783 INFO [RS:0;d7d59561a64e:42007 {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,42007,1732135038259-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:18,784 INFO [RS:0;d7d59561a64e:42007 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:18,784 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.Replication(171): d7d59561a64e,42007,1732135038259 started 2024-11-20T20:37:18,801 INFO [RS:0;d7d59561a64e:42007 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:18,801 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.HRegionServer(1482): Serving as d7d59561a64e,42007,1732135038259, RpcServer on d7d59561a64e/172.17.0.2:42007, sessionid=0x10159f728160001 2024-11-20T20:37:18,801 DEBUG [RS:0;d7d59561a64e:42007 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T20:37:18,801 DEBUG [RS:0;d7d59561a64e:42007 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d7d59561a64e,42007,1732135038259 2024-11-20T20:37:18,801 DEBUG [RS:0;d7d59561a64e:42007 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd7d59561a64e,42007,1732135038259' 2024-11-20T20:37:18,801 DEBUG [RS:0;d7d59561a64e:42007 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T20:37:18,802 DEBUG [RS:0;d7d59561a64e:42007 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T20:37:18,803 DEBUG [RS:0;d7d59561a64e:42007 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T20:37:18,803 DEBUG [RS:0;d7d59561a64e:42007 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T20:37:18,803 DEBUG [RS:0;d7d59561a64e:42007 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d7d59561a64e,42007,1732135038259 2024-11-20T20:37:18,803 DEBUG [RS:0;d7d59561a64e:42007 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd7d59561a64e,42007,1732135038259' 2024-11-20T20:37:18,803 DEBUG [RS:0;d7d59561a64e:42007 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T20:37:18,803 DEBUG [RS:0;d7d59561a64e:42007 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T20:37:18,804 DEBUG [RS:0;d7d59561a64e:42007 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T20:37:18,804 INFO [RS:0;d7d59561a64e:42007 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T20:37:18,804 INFO [RS:0;d7d59561a64e:42007 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T20:37:18,871 WARN [d7d59561a64e:37237 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T20:37:18,908 INFO [RS:0;d7d59561a64e:42007 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C42007%2C1732135038259, suffix=, logDir=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/WALs/d7d59561a64e,42007,1732135038259, archiveDir=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/oldWALs, maxLogs=32 2024-11-20T20:37:18,913 INFO [RS:0;d7d59561a64e:42007 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C42007%2C1732135038259.1732135038912 2024-11-20T20:37:18,923 INFO [RS:0;d7d59561a64e:42007 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/WALs/d7d59561a64e,42007,1732135038259/d7d59561a64e%2C42007%2C1732135038259.1732135038912 2024-11-20T20:37:18,928 DEBUG [RS:0;d7d59561a64e:42007 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45589:45589),(127.0.0.1/127.0.0.1:35079:35079)] 2024-11-20T20:37:19,121 DEBUG [d7d59561a64e:37237 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T20:37:19,122 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d7d59561a64e,42007,1732135038259 2024-11-20T20:37:19,126 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d7d59561a64e,42007,1732135038259, state=OPENING 2024-11-20T20:37:19,135 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T20:37:19,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:19,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:19,146 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T20:37:19,146 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:37:19,146 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:37:19,146 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d7d59561a64e,42007,1732135038259}] 2024-11-20T20:37:19,302 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T20:37:19,306 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55675, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T20:37:19,312 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T20:37:19,312 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:37:19,315 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C42007%2C1732135038259.meta, suffix=.meta, logDir=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/WALs/d7d59561a64e,42007,1732135038259, archiveDir=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/oldWALs, maxLogs=32 2024-11-20T20:37:19,317 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C42007%2C1732135038259.meta.1732135039317.meta 2024-11-20T20:37:19,323 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/WALs/d7d59561a64e,42007,1732135038259/d7d59561a64e%2C42007%2C1732135038259.meta.1732135039317.meta 2024-11-20T20:37:19,324 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45589:45589),(127.0.0.1/127.0.0.1:35079:35079)] 2024-11-20T20:37:19,325 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:37:19,325 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T20:37:19,326 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T20:37:19,326 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T20:37:19,326 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T20:37:19,326 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:37:19,326 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T20:37:19,326 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T20:37:19,328 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T20:37:19,329 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T20:37:19,329 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:19,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:19,330 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T20:37:19,331 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T20:37:19,331 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:19,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:19,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T20:37:19,333 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T20:37:19,333 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:19,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:19,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T20:37:19,335 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T20:37:19,335 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:19,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:19,336 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T20:37:19,337 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/data/hbase/meta/1588230740 2024-11-20T20:37:19,338 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/data/hbase/meta/1588230740 2024-11-20T20:37:19,340 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T20:37:19,340 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T20:37:19,340 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T20:37:19,343 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T20:37:19,344 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=822600, jitterRate=0.04599124193191528}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T20:37:19,344 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T20:37:19,345 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732135039326Writing region info on filesystem at 1732135039326Initializing all the Stores at 1732135039327 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135039327Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135039328 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135039328Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135039328Cleaning up temporary data from old regions at 1732135039340 (+12 ms)Running coprocessor post-open hooks at 1732135039344 (+4 ms)Region opened successfully at 1732135039345 (+1 ms) 2024-11-20T20:37:19,346 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732135039302 2024-11-20T20:37:19,349 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T20:37:19,349 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T20:37:19,350 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d7d59561a64e,42007,1732135038259 2024-11-20T20:37:19,351 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d7d59561a64e,42007,1732135038259, state=OPEN 2024-11-20T20:37:19,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T20:37:19,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T20:37:19,383 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d7d59561a64e,42007,1732135038259 2024-11-20T20:37:19,383 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:37:19,383 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:37:19,387 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T20:37:19,387 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d7d59561a64e,42007,1732135038259 in 237 msec 2024-11-20T20:37:19,390 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T20:37:19,390 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 671 msec 2024-11-20T20:37:19,391 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:37:19,391 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T20:37:19,393 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T20:37:19,393 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d7d59561a64e,42007,1732135038259, seqNum=-1] 2024-11-20T20:37:19,393 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T20:37:19,395 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52913, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T20:37:19,404 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 762 msec 2024-11-20T20:37:19,404 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732135039404, completionTime=-1 2024-11-20T20:37:19,404 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T20:37:19,404 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T20:37:19,407 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T20:37:19,407 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732135099407 2024-11-20T20:37:19,407 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732135159407 2024-11-20T20:37:19,407 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-20T20:37:19,407 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,37237,1732135038078-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:19,407 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,37237,1732135038078-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:19,408 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,37237,1732135038078-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:19,408 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d7d59561a64e:37237, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:19,408 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:19,408 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:19,410 DEBUG [master/d7d59561a64e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T20:37:19,413 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.089sec 2024-11-20T20:37:19,413 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T20:37:19,413 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T20:37:19,413 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T20:37:19,413 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T20:37:19,413 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T20:37:19,413 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,37237,1732135038078-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T20:37:19,413 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,37237,1732135038078-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T20:37:19,416 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T20:37:19,416 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T20:37:19,416 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,37237,1732135038078-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:19,485 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@209f3bad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:37:19,485 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d7d59561a64e,37237,-1 for getting cluster id 2024-11-20T20:37:19,485 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T20:37:19,487 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '155795b7-0f3d-4e68-b70a-06c6a6a5f04a' 2024-11-20T20:37:19,488 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T20:37:19,488 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "155795b7-0f3d-4e68-b70a-06c6a6a5f04a" 2024-11-20T20:37:19,488 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@367dc70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:37:19,488 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d7d59561a64e,37237,-1] 2024-11-20T20:37:19,489 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T20:37:19,489 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:37:19,491 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39790, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T20:37:19,492 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f9c5052, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:37:19,493 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T20:37:19,494 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d7d59561a64e,42007,1732135038259, seqNum=-1] 2024-11-20T20:37:19,495 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T20:37:19,498 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51954, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T20:37:19,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d7d59561a64e,37237,1732135038078 2024-11-20T20:37:19,500 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:37:19,501 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:19,504 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T20:37:19,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T20:37:19,505 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T20:37:19,505 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:37:19,505 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:37:19,505 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:37:19,505 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:37:19,505 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T20:37:19,505 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T20:37:19,505 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=131514615, stopped=false 2024-11-20T20:37:19,505 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d7d59561a64e,37237,1732135038078 2024-11-20T20:37:19,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T20:37:19,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T20:37:19,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:19,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:19,524 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T20:37:19,524 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T20:37:19,525 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:37:19,525 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:37:19,525 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:37:19,525 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:37:19,525 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd7d59561a64e,42007,1732135038259' ***** 2024-11-20T20:37:19,525 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T20:37:19,525 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T20:37:19,526 INFO [RS:0;d7d59561a64e:42007 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T20:37:19,526 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T20:37:19,526 INFO [RS:0;d7d59561a64e:42007 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T20:37:19,526 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.HRegionServer(959): stopping server d7d59561a64e,42007,1732135038259 2024-11-20T20:37:19,526 INFO [RS:0;d7d59561a64e:42007 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T20:37:19,526 INFO [RS:0;d7d59561a64e:42007 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d7d59561a64e:42007. 2024-11-20T20:37:19,526 DEBUG [RS:0;d7d59561a64e:42007 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:37:19,526 DEBUG [RS:0;d7d59561a64e:42007 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:37:19,526 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T20:37:19,526 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T20:37:19,526 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T20:37:19,526 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T20:37:19,527 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-20T20:37:19,527 DEBUG [RS:0;d7d59561a64e:42007 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-20T20:37:19,527 DEBUG [RS:0;d7d59561a64e:42007 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-20T20:37:19,527 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T20:37:19,527 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T20:37:19,527 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T20:37:19,527 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T20:37:19,527 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T20:37:19,527 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-20T20:37:19,552 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/data/hbase/meta/1588230740/.tmp/ns/43fac2a875764266a0b4cd5aae7cc42a is 43, key is default/ns:d/1732135039396/Put/seqid=0 2024-11-20T20:37:19,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37597 is added to blk_1073741835_1011 (size=5153) 2024-11-20T20:37:19,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38373 is added to blk_1073741835_1011 (size=5153) 2024-11-20T20:37:19,563 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/data/hbase/meta/1588230740/.tmp/ns/43fac2a875764266a0b4cd5aae7cc42a 2024-11-20T20:37:19,571 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/data/hbase/meta/1588230740/.tmp/ns/43fac2a875764266a0b4cd5aae7cc42a as hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/data/hbase/meta/1588230740/ns/43fac2a875764266a0b4cd5aae7cc42a 2024-11-20T20:37:19,578 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/data/hbase/meta/1588230740/ns/43fac2a875764266a0b4cd5aae7cc42a, entries=2, sequenceid=6, filesize=5.0 K 2024-11-20T20:37:19,580 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 52ms, sequenceid=6, compaction requested=false 2024-11-20T20:37:19,580 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T20:37:19,585 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T20:37:19,586 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T20:37:19,586 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T20:37:19,586 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732135039527Running coprocessor pre-close hooks at 1732135039527Disabling compacts and flushes for region at 1732135039527Disabling writes for close at 1732135039527Obtaining lock to block concurrent updates at 1732135039527Preparing flush snapshotting stores in 1588230740 at 1732135039527Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732135039528 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732135039529 (+1 ms)Flushing 1588230740/ns: creating writer at 1732135039529Flushing 1588230740/ns: appending metadata at 1732135039552 (+23 ms)Flushing 1588230740/ns: closing flushed file at 1732135039552Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a6f9365: reopening flushed file at 1732135039570 (+18 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 52ms, sequenceid=6, compaction requested=false at 1732135039580 (+10 ms)Writing region close event to WAL at 1732135039581 (+1 ms)Running coprocessor post-close hooks at 1732135039586 (+5 ms)Closed at 1732135039586 2024-11-20T20:37:19,587 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T20:37:19,727 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.HRegionServer(976): stopping server d7d59561a64e,42007,1732135038259; all regions closed. 2024-11-20T20:37:19,728 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:19,728 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:19,728 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:19,728 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:19,728 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:19,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38373 is added to blk_1073741834_1010 (size=1152) 2024-11-20T20:37:19,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37597 is added to blk_1073741834_1010 (size=1152) 2024-11-20T20:37:19,734 DEBUG [RS:0;d7d59561a64e:42007 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/oldWALs 2024-11-20T20:37:19,734 INFO [RS:0;d7d59561a64e:42007 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d7d59561a64e%2C42007%2C1732135038259.meta:.meta(num 1732135039317) 2024-11-20T20:37:19,734 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:19,734 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:19,734 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:19,734 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:19,735 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:19,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38373 is added to blk_1073741833_1009 (size=93) 2024-11-20T20:37:19,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37597 is added to blk_1073741833_1009 (size=93) 2024-11-20T20:37:19,739 DEBUG [RS:0;d7d59561a64e:42007 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/oldWALs 2024-11-20T20:37:19,739 INFO [RS:0;d7d59561a64e:42007 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d7d59561a64e%2C42007%2C1732135038259:(num 1732135038912) 2024-11-20T20:37:19,739 DEBUG [RS:0;d7d59561a64e:42007 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:37:19,739 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T20:37:19,740 INFO [RS:0;d7d59561a64e:42007 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T20:37:19,740 INFO [RS:0;d7d59561a64e:42007 {}] hbase.ChoreService(370): Chore service for: regionserver/d7d59561a64e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T20:37:19,740 INFO [RS:0;d7d59561a64e:42007 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T20:37:19,740 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T20:37:19,740 INFO [RS:0;d7d59561a64e:42007 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42007 2024-11-20T20:37:19,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d7d59561a64e,42007,1732135038259 2024-11-20T20:37:19,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T20:37:19,745 INFO [RS:0;d7d59561a64e:42007 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T20:37:19,746 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d7d59561a64e,42007,1732135038259] 2024-11-20T20:37:19,766 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d7d59561a64e,42007,1732135038259 already deleted, retry=false 2024-11-20T20:37:19,766 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d7d59561a64e,42007,1732135038259 expired; onlineServers=0 2024-11-20T20:37:19,766 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd7d59561a64e,37237,1732135038078' ***** 2024-11-20T20:37:19,766 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T20:37:19,766 INFO [M:0;d7d59561a64e:37237 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T20:37:19,766 INFO [M:0;d7d59561a64e:37237 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T20:37:19,766 DEBUG [M:0;d7d59561a64e:37237 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T20:37:19,767 DEBUG [M:0;d7d59561a64e:37237 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T20:37:19,767 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T20:37:19,767 DEBUG [master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135038661 {}] cleaner.HFileCleaner(306): Exit Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135038661,5,FailOnTimeoutGroup] 2024-11-20T20:37:19,767 DEBUG [master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135038663 {}] cleaner.HFileCleaner(306): Exit Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135038663,5,FailOnTimeoutGroup] 2024-11-20T20:37:19,767 INFO [M:0;d7d59561a64e:37237 {}] hbase.ChoreService(370): Chore service for: master/d7d59561a64e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T20:37:19,767 INFO [M:0;d7d59561a64e:37237 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T20:37:19,767 DEBUG [M:0;d7d59561a64e:37237 {}] master.HMaster(1795): Stopping service threads 2024-11-20T20:37:19,767 INFO [M:0;d7d59561a64e:37237 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T20:37:19,767 INFO [M:0;d7d59561a64e:37237 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T20:37:19,767 INFO [M:0;d7d59561a64e:37237 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T20:37:19,768 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T20:37:19,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T20:37:19,777 DEBUG [M:0;d7d59561a64e:37237 {}] zookeeper.ZKUtil(347): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T20:37:19,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:19,777 WARN [M:0;d7d59561a64e:37237 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T20:37:19,777 INFO [M:0;d7d59561a64e:37237 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/.lastflushedseqids 2024-11-20T20:37:19,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37597 is added to blk_1073741836_1012 (size=99) 2024-11-20T20:37:19,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38373 is added to blk_1073741836_1012 (size=99) 2024-11-20T20:37:19,785 INFO [M:0;d7d59561a64e:37237 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T20:37:19,785 INFO [M:0;d7d59561a64e:37237 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T20:37:19,785 DEBUG [M:0;d7d59561a64e:37237 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T20:37:19,785 INFO [M:0;d7d59561a64e:37237 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:37:19,785 DEBUG [M:0;d7d59561a64e:37237 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:37:19,785 DEBUG [M:0;d7d59561a64e:37237 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T20:37:19,786 DEBUG [M:0;d7d59561a64e:37237 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:37:19,786 INFO [M:0;d7d59561a64e:37237 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-20T20:37:19,804 DEBUG [M:0;d7d59561a64e:37237 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/af55c855c43c47c68a809653131eeb54 is 82, key is hbase:meta,,1/info:regioninfo/1732135039350/Put/seqid=0 2024-11-20T20:37:19,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37597 is added to blk_1073741837_1013 (size=5672) 2024-11-20T20:37:19,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38373 is added to blk_1073741837_1013 (size=5672) 2024-11-20T20:37:19,811 INFO [M:0;d7d59561a64e:37237 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/af55c855c43c47c68a809653131eeb54 2024-11-20T20:37:19,832 DEBUG [M:0;d7d59561a64e:37237 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b06f336f2dcb42179f0e668a205b3073 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732135039403/Put/seqid=0 2024-11-20T20:37:19,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37597 is added to blk_1073741838_1014 (size=5275) 2024-11-20T20:37:19,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38373 is added to blk_1073741838_1014 (size=5275) 2024-11-20T20:37:19,839 INFO [M:0;d7d59561a64e:37237 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b06f336f2dcb42179f0e668a205b3073 2024-11-20T20:37:19,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:37:19,856 INFO [RS:0;d7d59561a64e:42007 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T20:37:19,856 INFO [RS:0;d7d59561a64e:42007 {}] regionserver.HRegionServer(1031): Exiting; stopping=d7d59561a64e,42007,1732135038259; zookeeper connection closed. 2024-11-20T20:37:19,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42007-0x10159f728160001, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:37:19,856 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6eb10c59 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6eb10c59 2024-11-20T20:37:19,856 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T20:37:19,859 DEBUG [M:0;d7d59561a64e:37237 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c7dd7a5cc0ee4d2a80817e82b3cc6b11 is 69, key is d7d59561a64e,42007,1732135038259/rs:state/1732135038728/Put/seqid=0 2024-11-20T20:37:19,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37597 is added to blk_1073741839_1015 (size=5156) 2024-11-20T20:37:19,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38373 is added to blk_1073741839_1015 (size=5156) 2024-11-20T20:37:19,865 INFO [M:0;d7d59561a64e:37237 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c7dd7a5cc0ee4d2a80817e82b3cc6b11 2024-11-20T20:37:19,884 DEBUG [M:0;d7d59561a64e:37237 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c57ea6c431244f67bbe6b8a845f579a2 is 52, key is load_balancer_on/state:d/1732135039503/Put/seqid=0 2024-11-20T20:37:19,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38373 is added to blk_1073741840_1016 (size=5056) 2024-11-20T20:37:19,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37597 is added to blk_1073741840_1016 (size=5056) 2024-11-20T20:37:19,890 INFO [M:0;d7d59561a64e:37237 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c57ea6c431244f67bbe6b8a845f579a2 2024-11-20T20:37:19,897 DEBUG [M:0;d7d59561a64e:37237 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/af55c855c43c47c68a809653131eeb54 as hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/af55c855c43c47c68a809653131eeb54 2024-11-20T20:37:19,903 INFO [M:0;d7d59561a64e:37237 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/af55c855c43c47c68a809653131eeb54, entries=8, sequenceid=29, filesize=5.5 K 2024-11-20T20:37:19,905 DEBUG [M:0;d7d59561a64e:37237 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b06f336f2dcb42179f0e668a205b3073 as hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b06f336f2dcb42179f0e668a205b3073 2024-11-20T20:37:19,911 INFO [M:0;d7d59561a64e:37237 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b06f336f2dcb42179f0e668a205b3073, entries=3, sequenceid=29, filesize=5.2 K 2024-11-20T20:37:19,913 DEBUG [M:0;d7d59561a64e:37237 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c7dd7a5cc0ee4d2a80817e82b3cc6b11 as hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c7dd7a5cc0ee4d2a80817e82b3cc6b11 2024-11-20T20:37:19,919 INFO [M:0;d7d59561a64e:37237 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c7dd7a5cc0ee4d2a80817e82b3cc6b11, entries=1, sequenceid=29, filesize=5.0 K 2024-11-20T20:37:19,921 DEBUG [M:0;d7d59561a64e:37237 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c57ea6c431244f67bbe6b8a845f579a2 as hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c57ea6c431244f67bbe6b8a845f579a2 2024-11-20T20:37:19,928 INFO [M:0;d7d59561a64e:37237 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40197/user/jenkins/test-data/56137543-6e11-c444-cd4a-41bb40e388c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c57ea6c431244f67bbe6b8a845f579a2, entries=1, sequenceid=29, filesize=4.9 K 2024-11-20T20:37:19,929 INFO [M:0;d7d59561a64e:37237 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=29, compaction requested=false 2024-11-20T20:37:19,931 INFO [M:0;d7d59561a64e:37237 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:37:19,931 DEBUG [M:0;d7d59561a64e:37237 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732135039785Disabling compacts and flushes for region at 1732135039785Disabling writes for close at 1732135039785Obtaining lock to block concurrent updates at 1732135039786 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732135039786Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732135039786Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732135039787 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732135039787Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732135039804 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732135039804Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732135039817 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732135039832 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732135039832Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732135039844 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732135039859 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732135039859Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732135039870 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732135039884 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732135039884Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a765614: reopening flushed file at 1732135039896 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@71ea168f: reopening flushed file at 1732135039903 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42c6b096: reopening flushed file at 1732135039912 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5dccad3a: reopening flushed file at 1732135039920 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=29, compaction requested=false at 1732135039929 (+9 ms)Writing region close event to WAL at 1732135039931 (+2 ms)Closed at 1732135039931 2024-11-20T20:37:19,931 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:19,932 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:19,932 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:19,932 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:19,932 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:19,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38373 is added to blk_1073741830_1006 (size=10311) 2024-11-20T20:37:19,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37597 is added to blk_1073741830_1006 (size=10311) 2024-11-20T20:37:19,935 INFO [M:0;d7d59561a64e:37237 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T20:37:19,935 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T20:37:19,935 INFO [M:0;d7d59561a64e:37237 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37237 2024-11-20T20:37:19,935 INFO [M:0;d7d59561a64e:37237 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T20:37:20,022 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T20:37:20,033 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:37:20,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:37:20,045 INFO [M:0;d7d59561a64e:37237 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T20:37:20,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37237-0x10159f728160000, quorum=127.0.0.1:58745, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:37:20,050 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a329c51{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:37:20,050 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d8729f1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:37:20,050 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:37:20,050 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a17cc25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:37:20,050 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7130e643{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/hadoop.log.dir/,STOPPED} 2024-11-20T20:37:20,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:37:20,063 WARN [BP-1578448213-172.17.0.2-1732135035150 heartbeating to localhost/127.0.0.1:40197 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:37:20,063 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:37:20,064 WARN [BP-1578448213-172.17.0.2-1732135035150 heartbeating to localhost/127.0.0.1:40197 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1578448213-172.17.0.2-1732135035150 (Datanode Uuid b70d4a19-66aa-443f-b659-047836a92a85) service to localhost/127.0.0.1:40197 2024-11-20T20:37:20,064 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:37:20,064 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/cluster_82c09e8d-7a01-1943-eb7a-b3386e765784/data/data3/current/BP-1578448213-172.17.0.2-1732135035150 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:37:20,064 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/cluster_82c09e8d-7a01-1943-eb7a-b3386e765784/data/data4/current/BP-1578448213-172.17.0.2-1732135035150 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:37:20,065 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:37:20,069 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77ff58b7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:37:20,069 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9d8d6d6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:37:20,069 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:37:20,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39fb5954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:37:20,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5357e169{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/hadoop.log.dir/,STOPPED} 2024-11-20T20:37:20,072 WARN [BP-1578448213-172.17.0.2-1732135035150 heartbeating to localhost/127.0.0.1:40197 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:37:20,072 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:37:20,072 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:37:20,072 WARN [BP-1578448213-172.17.0.2-1732135035150 heartbeating to localhost/127.0.0.1:40197 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1578448213-172.17.0.2-1732135035150 (Datanode Uuid 1af8496b-6fd3-42af-8fee-66f549a04cdb) service to localhost/127.0.0.1:40197 2024-11-20T20:37:20,073 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/cluster_82c09e8d-7a01-1943-eb7a-b3386e765784/data/data1/current/BP-1578448213-172.17.0.2-1732135035150 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:37:20,073 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/cluster_82c09e8d-7a01-1943-eb7a-b3386e765784/data/data2/current/BP-1578448213-172.17.0.2-1732135035150 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:37:20,074 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:37:20,081 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5bc93f4{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T20:37:20,082 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ec80534{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:37:20,082 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:37:20,082 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69fa0c88{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:37:20,082 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5686c999{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/hadoop.log.dir/,STOPPED} 2024-11-20T20:37:20,089 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T20:37:20,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T20:37:20,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T20:37:20,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/hadoop.log.dir so I do NOT create it in target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761 2024-11-20T20:37:20,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5efc2c9d-ff07-c207-f5d0-cc8ffef45251/hadoop.tmp.dir so I do NOT create it in target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761 2024-11-20T20:37:20,106 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4, deleteOnExit=true 2024-11-20T20:37:20,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T20:37:20,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/test.cache.data in system properties and HBase conf 2024-11-20T20:37:20,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T20:37:20,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.log.dir in system properties and HBase conf 2024-11-20T20:37:20,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T20:37:20,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T20:37:20,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T20:37:20,107 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T20:37:20,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T20:37:20,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T20:37:20,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T20:37:20,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T20:37:20,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T20:37:20,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T20:37:20,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T20:37:20,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T20:37:20,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T20:37:20,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/nfs.dump.dir in system properties and HBase conf 2024-11-20T20:37:20,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/java.io.tmpdir in system properties and HBase conf 2024-11-20T20:37:20,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T20:37:20,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T20:37:20,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T20:37:20,121 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T20:37:20,401 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:37:20,408 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:37:20,412 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:37:20,412 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:37:20,412 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T20:37:20,413 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:37:20,413 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@52b2db2b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:37:20,414 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d3a3315{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:37:20,507 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@279f484a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/java.io.tmpdir/jetty-localhost-37169-hadoop-hdfs-3_4_1-tests_jar-_-any-13242534452860213049/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T20:37:20,508 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@40334b2a{HTTP/1.1, (http/1.1)}{localhost:37169} 2024-11-20T20:37:20,508 INFO [Time-limited test {}] server.Server(415): Started @114215ms 2024-11-20T20:37:20,520 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T20:37:20,757 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:37:20,761 INFO [regionserver/d7d59561a64e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T20:37:20,762 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:37:20,763 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:37:20,763 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:37:20,763 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T20:37:20,764 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46acec94{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:37:20,764 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ecffd8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:37:20,862 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f809e08{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/java.io.tmpdir/jetty-localhost-38693-hadoop-hdfs-3_4_1-tests_jar-_-any-9995344389737863666/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:37:20,863 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@463279{HTTP/1.1, (http/1.1)}{localhost:38693} 2024-11-20T20:37:20,863 INFO [Time-limited test {}] server.Server(415): Started @114570ms 2024-11-20T20:37:20,865 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:37:20,893 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:37:20,896 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:37:20,897 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:37:20,897 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:37:20,897 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T20:37:20,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79bab151{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:37:20,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3fbfd6e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:37:20,991 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@622112cc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/java.io.tmpdir/jetty-localhost-41085-hadoop-hdfs-3_4_1-tests_jar-_-any-17995763394018297320/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:37:20,992 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@74ca9210{HTTP/1.1, (http/1.1)}{localhost:41085} 2024-11-20T20:37:20,992 INFO [Time-limited test {}] server.Server(415): Started @114698ms 2024-11-20T20:37:20,993 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:37:22,028 WARN [Thread-675 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data2/current/BP-1565890324-172.17.0.2-1732135040132/current, will proceed with Du for space computation calculation, 2024-11-20T20:37:22,028 WARN [Thread-674 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data1/current/BP-1565890324-172.17.0.2-1732135040132/current, will proceed with Du for space computation calculation, 2024-11-20T20:37:22,054 WARN [Thread-638 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:37:22,056 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x78b73f604ab11a1b with lease ID 0xfb1b17cde5bf6377: Processing first storage report for DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e from datanode DatanodeRegistration(127.0.0.1:44305, datanodeUuid=8f10fa94-38eb-4e68-a103-62af83273be6, infoPort=44639, infoSecurePort=0, ipcPort=36261, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132) 2024-11-20T20:37:22,056 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x78b73f604ab11a1b with lease ID 0xfb1b17cde5bf6377: from storage DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e node DatanodeRegistration(127.0.0.1:44305, datanodeUuid=8f10fa94-38eb-4e68-a103-62af83273be6, infoPort=44639, infoSecurePort=0, ipcPort=36261, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:37:22,056 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x78b73f604ab11a1b with lease ID 0xfb1b17cde5bf6377: Processing first storage report for DS-5877026c-7e9d-4bbf-8121-3730fe8b3304 from datanode DatanodeRegistration(127.0.0.1:44305, datanodeUuid=8f10fa94-38eb-4e68-a103-62af83273be6, infoPort=44639, infoSecurePort=0, ipcPort=36261, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132) 2024-11-20T20:37:22,056 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x78b73f604ab11a1b with lease ID 0xfb1b17cde5bf6377: from storage DS-5877026c-7e9d-4bbf-8121-3730fe8b3304 node DatanodeRegistration(127.0.0.1:44305, datanodeUuid=8f10fa94-38eb-4e68-a103-62af83273be6, infoPort=44639, infoSecurePort=0, ipcPort=36261, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:37:22,165 WARN [Thread-686 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data4/current/BP-1565890324-172.17.0.2-1732135040132/current, will proceed with Du for space computation calculation, 2024-11-20T20:37:22,165 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data3/current/BP-1565890324-172.17.0.2-1732135040132/current, will proceed with Du for space computation calculation, 2024-11-20T20:37:22,180 WARN [Thread-661 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:37:22,183 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3bd16667d42290a with lease ID 0xfb1b17cde5bf6378: Processing first storage report for DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae from datanode DatanodeRegistration(127.0.0.1:40343, datanodeUuid=2f069166-49cc-4b43-8675-73d4df62dda0, infoPort=40227, infoSecurePort=0, ipcPort=43753, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132) 2024-11-20T20:37:22,183 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3bd16667d42290a with lease ID 0xfb1b17cde5bf6378: from storage DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae node DatanodeRegistration(127.0.0.1:40343, datanodeUuid=2f069166-49cc-4b43-8675-73d4df62dda0, infoPort=40227, infoSecurePort=0, ipcPort=43753, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:37:22,183 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3bd16667d42290a with lease ID 0xfb1b17cde5bf6378: Processing first storage report for DS-d9b9bbc2-356b-4db3-951b-ed9cef04c39e from datanode DatanodeRegistration(127.0.0.1:40343, datanodeUuid=2f069166-49cc-4b43-8675-73d4df62dda0, infoPort=40227, infoSecurePort=0, ipcPort=43753, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132) 2024-11-20T20:37:22,183 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3bd16667d42290a with lease ID 0xfb1b17cde5bf6378: from storage DS-d9b9bbc2-356b-4db3-951b-ed9cef04c39e node DatanodeRegistration(127.0.0.1:40343, datanodeUuid=2f069166-49cc-4b43-8675-73d4df62dda0, infoPort=40227, infoSecurePort=0, ipcPort=43753, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:37:22,230 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761 2024-11-20T20:37:22,232 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/zookeeper_0, clientPort=52190, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T20:37:22,233 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52190 2024-11-20T20:37:22,234 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:22,235 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:22,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44305 is added to blk_1073741825_1001 (size=7) 2024-11-20T20:37:22,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40343 is added to blk_1073741825_1001 (size=7) 2024-11-20T20:37:22,249 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5 with version=8 2024-11-20T20:37:22,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/hbase-staging 2024-11-20T20:37:22,252 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d7d59561a64e:0 server-side Connection retries=45 2024-11-20T20:37:22,253 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:37:22,253 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T20:37:22,253 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T20:37:22,253 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:37:22,253 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T20:37:22,253 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T20:37:22,253 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T20:37:22,254 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38137 2024-11-20T20:37:22,255 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38137 connecting to ZooKeeper ensemble=127.0.0.1:52190 2024-11-20T20:37:22,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:381370x0, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T20:37:22,313 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38137-0x10159f738630000 connected 2024-11-20T20:37:22,398 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:22,400 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:22,404 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:37:22,404 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5, hbase.cluster.distributed=false 2024-11-20T20:37:22,407 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T20:37:22,408 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38137 2024-11-20T20:37:22,408 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38137 2024-11-20T20:37:22,410 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38137 2024-11-20T20:37:22,412 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38137 2024-11-20T20:37:22,412 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38137 2024-11-20T20:37:22,427 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d7d59561a64e:0 server-side Connection retries=45 2024-11-20T20:37:22,427 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:37:22,427 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T20:37:22,428 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T20:37:22,428 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:37:22,428 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T20:37:22,428 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T20:37:22,428 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T20:37:22,429 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39199 2024-11-20T20:37:22,430 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39199 connecting to ZooKeeper ensemble=127.0.0.1:52190 2024-11-20T20:37:22,431 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:22,433 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:22,439 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:391990x0, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T20:37:22,440 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:37:22,440 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39199-0x10159f738630001 connected 2024-11-20T20:37:22,440 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T20:37:22,441 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T20:37:22,441 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T20:37:22,443 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T20:37:22,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39199 2024-11-20T20:37:22,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39199 2024-11-20T20:37:22,444 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39199 2024-11-20T20:37:22,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39199 2024-11-20T20:37:22,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39199 2024-11-20T20:37:22,460 DEBUG [M:0;d7d59561a64e:38137 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d7d59561a64e:38137 2024-11-20T20:37:22,460 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d7d59561a64e,38137,1732135042252 2024-11-20T20:37:22,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:37:22,471 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:37:22,472 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d7d59561a64e,38137,1732135042252 2024-11-20T20:37:22,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:22,481 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T20:37:22,482 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:22,482 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T20:37:22,482 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d7d59561a64e,38137,1732135042252 from backup master directory 2024-11-20T20:37:22,492 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:37:22,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d7d59561a64e,38137,1732135042252 2024-11-20T20:37:22,492 WARN [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T20:37:22,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:37:22,492 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d7d59561a64e,38137,1732135042252 2024-11-20T20:37:22,497 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/hbase.id] with ID: 0d734dc0-6efb-4a14-aefb-c16c29e0442b 2024-11-20T20:37:22,497 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/.tmp/hbase.id 2024-11-20T20:37:22,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44305 is added to blk_1073741826_1002 (size=42) 2024-11-20T20:37:22,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40343 is added to blk_1073741826_1002 (size=42) 2024-11-20T20:37:22,504 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/.tmp/hbase.id]:[hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/hbase.id] 2024-11-20T20:37:22,517 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:22,517 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T20:37:22,518 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-20T20:37:22,523 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:22,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:22,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40343 is added to blk_1073741827_1003 (size=196) 2024-11-20T20:37:22,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44305 is added to blk_1073741827_1003 (size=196) 2024-11-20T20:37:22,531 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T20:37:22,532 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T20:37:22,532 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:37:22,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40343 is added to blk_1073741828_1004 (size=1189) 2024-11-20T20:37:22,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44305 is added to blk_1073741828_1004 (size=1189) 2024-11-20T20:37:22,542 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store 2024-11-20T20:37:22,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40343 is added to blk_1073741829_1005 (size=34) 2024-11-20T20:37:22,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44305 is added to blk_1073741829_1005 (size=34) 2024-11-20T20:37:22,551 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:37:22,551 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T20:37:22,551 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:37:22,551 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:37:22,551 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T20:37:22,551 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:37:22,552 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:37:22,552 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732135042551Disabling compacts and flushes for region at 1732135042551Disabling writes for close at 1732135042551Writing region close event to WAL at 1732135042552 (+1 ms)Closed at 1732135042552 2024-11-20T20:37:22,553 WARN [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/.initializing 2024-11-20T20:37:22,553 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252 2024-11-20T20:37:22,557 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C38137%2C1732135042252, suffix=, logDir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252, archiveDir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/oldWALs, maxLogs=10 2024-11-20T20:37:22,557 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C38137%2C1732135042252.1732135042557 2024-11-20T20:37:22,564 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 2024-11-20T20:37:22,569 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44639:44639),(127.0.0.1/127.0.0.1:40227:40227)] 2024-11-20T20:37:22,571 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:37:22,571 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:37:22,571 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:22,572 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:22,573 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:22,575 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T20:37:22,575 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:22,576 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:22,576 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:22,577 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T20:37:22,577 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:22,578 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:37:22,578 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:22,579 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T20:37:22,580 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:22,580 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:37:22,580 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:22,582 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T20:37:22,582 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:22,582 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:37:22,583 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:22,583 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:22,584 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:22,586 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:22,586 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:22,586 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T20:37:22,587 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:37:22,590 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:37:22,590 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=802865, jitterRate=0.020896106958389282}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T20:37:22,591 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732135042572Initializing all the Stores at 1732135042573 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135042573Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135042573Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135042573Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135042573Cleaning up temporary data from old regions at 1732135042586 (+13 ms)Region opened successfully at 1732135042591 (+5 ms) 2024-11-20T20:37:22,592 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T20:37:22,596 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f4de052, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d7d59561a64e/172.17.0.2:0 2024-11-20T20:37:22,597 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T20:37:22,597 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T20:37:22,597 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T20:37:22,597 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T20:37:22,598 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T20:37:22,598 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T20:37:22,598 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T20:37:22,601 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T20:37:22,601 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T20:37:22,735 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T20:37:22,736 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T20:37:22,737 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T20:37:22,744 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T20:37:22,745 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T20:37:22,746 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T20:37:22,755 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T20:37:22,757 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T20:37:22,766 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T20:37:22,769 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T20:37:22,776 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T20:37:22,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T20:37:22,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:22,787 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T20:37:22,787 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:22,787 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d7d59561a64e,38137,1732135042252, sessionid=0x10159f738630000, setting cluster-up flag (Was=false) 2024-11-20T20:37:22,850 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:22,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:22,881 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T20:37:22,883 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d7d59561a64e,38137,1732135042252 2024-11-20T20:37:22,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:22,903 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:22,934 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T20:37:22,936 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d7d59561a64e,38137,1732135042252 2024-11-20T20:37:22,938 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T20:37:22,940 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T20:37:22,940 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T20:37:22,940 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T20:37:22,941 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d7d59561a64e,38137,1732135042252 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T20:37:22,942 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:37:22,943 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:37:22,943 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:37:22,943 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:37:22,943 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d7d59561a64e:0, corePoolSize=10, maxPoolSize=10 2024-11-20T20:37:22,943 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:22,943 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d7d59561a64e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T20:37:22,944 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:22,948 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:37:22,948 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T20:37:22,949 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732135072948 2024-11-20T20:37:22,949 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T20:37:22,949 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T20:37:22,949 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T20:37:22,949 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T20:37:22,949 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T20:37:22,949 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T20:37:22,949 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:22,950 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:22,950 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T20:37:22,950 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T20:37:22,950 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T20:37:22,950 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer(746): ClusterId : 0d734dc0-6efb-4a14-aefb-c16c29e0442b 2024-11-20T20:37:22,950 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T20:37:22,950 DEBUG [RS:0;d7d59561a64e:39199 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T20:37:22,951 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T20:37:22,951 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T20:37:22,951 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135042951,5,FailOnTimeoutGroup] 2024-11-20T20:37:22,956 DEBUG [RS:0;d7d59561a64e:39199 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T20:37:22,956 DEBUG [RS:0;d7d59561a64e:39199 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T20:37:22,956 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135042951,5,FailOnTimeoutGroup] 2024-11-20T20:37:22,956 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:22,956 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T20:37:22,956 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:22,956 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:22,967 DEBUG [RS:0;d7d59561a64e:39199 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T20:37:22,967 DEBUG [RS:0;d7d59561a64e:39199 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58755364, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d7d59561a64e/172.17.0.2:0 2024-11-20T20:37:22,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44305 is added to blk_1073741831_1007 (size=1321) 2024-11-20T20:37:22,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40343 is added to blk_1073741831_1007 (size=1321) 2024-11-20T20:37:22,976 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T20:37:22,976 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5 2024-11-20T20:37:22,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40343 is added to blk_1073741832_1008 (size=32) 2024-11-20T20:37:22,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44305 is added to blk_1073741832_1008 (size=32) 2024-11-20T20:37:22,985 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:37:22,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T20:37:22,988 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T20:37:22,988 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:22,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:22,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T20:37:22,990 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T20:37:22,990 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:22,991 DEBUG [RS:0;d7d59561a64e:39199 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d7d59561a64e:39199 2024-11-20T20:37:22,991 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T20:37:22,991 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T20:37:22,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:22,991 DEBUG [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T20:37:22,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T20:37:22,992 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer(2659): reportForDuty to master=d7d59561a64e,38137,1732135042252 with port=39199, startcode=1732135042427 2024-11-20T20:37:22,992 DEBUG [RS:0;d7d59561a64e:39199 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T20:37:22,993 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T20:37:22,993 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:22,994 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:22,994 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T20:37:22,995 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T20:37:22,995 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:22,996 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:22,996 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T20:37:22,997 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740 2024-11-20T20:37:22,997 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37127, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T20:37:22,997 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740 2024-11-20T20:37:22,998 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38137 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d7d59561a64e,39199,1732135042427 2024-11-20T20:37:22,998 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38137 {}] master.ServerManager(517): Registering regionserver=d7d59561a64e,39199,1732135042427 2024-11-20T20:37:22,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T20:37:22,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T20:37:23,000 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T20:37:23,000 DEBUG [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5 2024-11-20T20:37:23,000 DEBUG [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44749 2024-11-20T20:37:23,000 DEBUG [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T20:37:23,001 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T20:37:23,005 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:37:23,005 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=703418, jitterRate=-0.10555873811244965}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T20:37:23,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732135042985Initializing all the Stores at 1732135042986 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135042986Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135042986Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135042986Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135042986Cleaning up temporary data from old regions at 1732135042999 (+13 ms)Region opened successfully at 1732135043006 (+7 ms) 2024-11-20T20:37:23,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T20:37:23,007 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T20:37:23,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T20:37:23,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T20:37:23,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T20:37:23,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T20:37:23,008 DEBUG [RS:0;d7d59561a64e:39199 {}] zookeeper.ZKUtil(111): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d7d59561a64e,39199,1732135042427 2024-11-20T20:37:23,008 WARN [RS:0;d7d59561a64e:39199 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T20:37:23,008 INFO [RS:0;d7d59561a64e:39199 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:37:23,009 DEBUG [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427 2024-11-20T20:37:23,009 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d7d59561a64e,39199,1732135042427] 2024-11-20T20:37:23,010 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T20:37:23,010 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732135043007Disabling compacts and flushes for region at 1732135043007Disabling writes for close at 1732135043007Writing region close event to WAL at 1732135043010 (+3 ms)Closed at 1732135043010 2024-11-20T20:37:23,012 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:37:23,012 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T20:37:23,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T20:37:23,013 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T20:37:23,014 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T20:37:23,043 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T20:37:23,048 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T20:37:23,049 INFO [RS:0;d7d59561a64e:39199 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T20:37:23,050 INFO [RS:0;d7d59561a64e:39199 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,050 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T20:37:23,051 INFO [RS:0;d7d59561a64e:39199 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T20:37:23,051 INFO [RS:0;d7d59561a64e:39199 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,051 DEBUG [RS:0;d7d59561a64e:39199 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,051 DEBUG [RS:0;d7d59561a64e:39199 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,052 DEBUG [RS:0;d7d59561a64e:39199 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,052 DEBUG [RS:0;d7d59561a64e:39199 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,052 DEBUG [RS:0;d7d59561a64e:39199 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,052 DEBUG [RS:0;d7d59561a64e:39199 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d7d59561a64e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T20:37:23,052 DEBUG [RS:0;d7d59561a64e:39199 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,052 DEBUG [RS:0;d7d59561a64e:39199 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,052 DEBUG [RS:0;d7d59561a64e:39199 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,052 DEBUG [RS:0;d7d59561a64e:39199 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,052 DEBUG [RS:0;d7d59561a64e:39199 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,052 DEBUG [RS:0;d7d59561a64e:39199 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,052 DEBUG [RS:0;d7d59561a64e:39199 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d7d59561a64e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T20:37:23,052 DEBUG [RS:0;d7d59561a64e:39199 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T20:37:23,057 INFO [RS:0;d7d59561a64e:39199 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,057 INFO [RS:0;d7d59561a64e:39199 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,057 INFO [RS:0;d7d59561a64e:39199 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,057 INFO [RS:0;d7d59561a64e:39199 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,057 INFO [RS:0;d7d59561a64e:39199 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,057 INFO [RS:0;d7d59561a64e:39199 {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,39199,1732135042427-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T20:37:23,075 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T20:37:23,075 INFO [RS:0;d7d59561a64e:39199 {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,39199,1732135042427-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,076 INFO [RS:0;d7d59561a64e:39199 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,076 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.Replication(171): d7d59561a64e,39199,1732135042427 started 2024-11-20T20:37:23,098 INFO [RS:0;d7d59561a64e:39199 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,098 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer(1482): Serving as d7d59561a64e,39199,1732135042427, RpcServer on d7d59561a64e/172.17.0.2:39199, sessionid=0x10159f738630001 2024-11-20T20:37:23,099 DEBUG [RS:0;d7d59561a64e:39199 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T20:37:23,099 DEBUG [RS:0;d7d59561a64e:39199 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d7d59561a64e,39199,1732135042427 2024-11-20T20:37:23,099 DEBUG [RS:0;d7d59561a64e:39199 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd7d59561a64e,39199,1732135042427' 2024-11-20T20:37:23,099 DEBUG [RS:0;d7d59561a64e:39199 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T20:37:23,100 DEBUG [RS:0;d7d59561a64e:39199 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T20:37:23,101 DEBUG [RS:0;d7d59561a64e:39199 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T20:37:23,101 DEBUG [RS:0;d7d59561a64e:39199 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T20:37:23,101 DEBUG [RS:0;d7d59561a64e:39199 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d7d59561a64e,39199,1732135042427 2024-11-20T20:37:23,101 DEBUG [RS:0;d7d59561a64e:39199 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd7d59561a64e,39199,1732135042427' 2024-11-20T20:37:23,102 DEBUG [RS:0;d7d59561a64e:39199 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T20:37:23,103 DEBUG [RS:0;d7d59561a64e:39199 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T20:37:23,103 DEBUG [RS:0;d7d59561a64e:39199 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T20:37:23,104 INFO [RS:0;d7d59561a64e:39199 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T20:37:23,104 INFO [RS:0;d7d59561a64e:39199 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T20:37:23,194 WARN [d7d59561a64e:38137 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T20:37:23,207 INFO [RS:0;d7d59561a64e:39199 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C39199%2C1732135042427, suffix=, logDir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427, archiveDir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/oldWALs, maxLogs=32 2024-11-20T20:37:23,208 INFO [RS:0;d7d59561a64e:39199 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C39199%2C1732135042427.1732135043207 2024-11-20T20:37:23,215 INFO [RS:0;d7d59561a64e:39199 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 2024-11-20T20:37:23,226 DEBUG [RS:0;d7d59561a64e:39199 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40227:40227),(127.0.0.1/127.0.0.1:44639:44639)] 2024-11-20T20:37:23,444 DEBUG [d7d59561a64e:38137 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T20:37:23,445 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d7d59561a64e,39199,1732135042427 2024-11-20T20:37:23,447 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d7d59561a64e,39199,1732135042427, state=OPENING 2024-11-20T20:37:23,471 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T20:37:23,555 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:23,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:37:23,556 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T20:37:23,556 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d7d59561a64e,39199,1732135042427}] 2024-11-20T20:37:23,556 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:37:23,556 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:37:23,711 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T20:37:23,716 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33115, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T20:37:23,722 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T20:37:23,722 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:37:23,724 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C39199%2C1732135042427.meta, suffix=.meta, logDir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427, archiveDir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/oldWALs, maxLogs=32 2024-11-20T20:37:23,724 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta 2024-11-20T20:37:23,730 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta 2024-11-20T20:37:23,733 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40227:40227),(127.0.0.1/127.0.0.1:44639:44639)] 2024-11-20T20:37:23,736 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:37:23,737 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T20:37:23,737 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T20:37:23,737 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T20:37:23,737 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T20:37:23,737 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:37:23,737 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T20:37:23,737 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T20:37:23,739 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T20:37:23,740 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T20:37:23,740 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:23,740 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:23,740 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T20:37:23,741 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T20:37:23,741 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:23,742 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:23,742 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T20:37:23,742 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T20:37:23,743 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:23,743 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:23,743 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T20:37:23,744 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T20:37:23,744 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:23,744 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:37:23,745 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T20:37:23,746 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740 2024-11-20T20:37:23,747 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740 2024-11-20T20:37:23,749 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T20:37:23,749 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T20:37:23,749 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T20:37:23,750 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T20:37:23,751 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=719945, jitterRate=-0.08454298973083496}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T20:37:23,751 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T20:37:23,752 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732135043737Writing region info on filesystem at 1732135043737Initializing all the Stores at 1732135043738 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135043738Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135043739 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135043739Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135043739Cleaning up temporary data from old regions at 1732135043749 (+10 ms)Running coprocessor post-open hooks at 1732135043751 (+2 ms)Region opened successfully at 1732135043752 (+1 ms) 2024-11-20T20:37:23,753 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732135043710 2024-11-20T20:37:23,756 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T20:37:23,756 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T20:37:23,757 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d7d59561a64e,39199,1732135042427 2024-11-20T20:37:23,757 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d7d59561a64e,39199,1732135042427, state=OPEN 2024-11-20T20:37:23,799 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T20:37:23,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T20:37:23,799 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d7d59561a64e,39199,1732135042427 2024-11-20T20:37:23,799 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:37:23,799 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:37:23,803 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T20:37:23,803 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d7d59561a64e,39199,1732135042427 in 243 msec 2024-11-20T20:37:23,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T20:37:23,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 791 msec 2024-11-20T20:37:23,807 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:37:23,807 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T20:37:23,808 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T20:37:23,808 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d7d59561a64e,39199,1732135042427, seqNum=-1] 2024-11-20T20:37:23,809 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T20:37:23,810 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50003, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T20:37:23,816 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 875 msec 2024-11-20T20:37:23,816 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732135043816, completionTime=-1 2024-11-20T20:37:23,816 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T20:37:23,816 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T20:37:23,818 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T20:37:23,818 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732135103818 2024-11-20T20:37:23,818 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732135163818 2024-11-20T20:37:23,818 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-20T20:37:23,818 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,38137,1732135042252-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,818 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,38137,1732135042252-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,818 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,38137,1732135042252-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,818 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d7d59561a64e:38137, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,819 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,819 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,820 DEBUG [master/d7d59561a64e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T20:37:23,822 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.330sec 2024-11-20T20:37:23,822 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T20:37:23,822 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T20:37:23,822 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T20:37:23,822 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T20:37:23,822 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T20:37:23,822 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,38137,1732135042252-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T20:37:23,823 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,38137,1732135042252-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T20:37:23,825 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T20:37:23,825 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T20:37:23,825 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,38137,1732135042252-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,851 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a6b9e3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:37:23,851 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d7d59561a64e,38137,-1 for getting cluster id 2024-11-20T20:37:23,851 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T20:37:23,853 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0d734dc0-6efb-4a14-aefb-c16c29e0442b' 2024-11-20T20:37:23,853 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T20:37:23,854 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0d734dc0-6efb-4a14-aefb-c16c29e0442b" 2024-11-20T20:37:23,854 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f9ef2e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:37:23,854 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d7d59561a64e,38137,-1] 2024-11-20T20:37:23,854 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T20:37:23,855 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:37:23,856 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36722, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T20:37:23,857 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@664ad8e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:37:23,857 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T20:37:23,858 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d7d59561a64e,39199,1732135042427, seqNum=-1] 2024-11-20T20:37:23,858 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T20:37:23,860 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56120, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T20:37:23,862 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d7d59561a64e,38137,1732135042252 2024-11-20T20:37:23,862 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:23,865 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T20:37:23,885 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d7d59561a64e:0 server-side Connection retries=45 2024-11-20T20:37:23,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:37:23,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T20:37:23,885 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T20:37:23,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:37:23,886 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T20:37:23,886 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T20:37:23,886 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T20:37:23,886 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35431 2024-11-20T20:37:23,888 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35431 connecting to ZooKeeper ensemble=127.0.0.1:52190 2024-11-20T20:37:23,889 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:23,890 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:37:23,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:354310x0, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T20:37:23,913 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35431-0x10159f738630002 connected 2024-11-20T20:37:23,913 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:35431-0x10159f738630002, quorum=127.0.0.1:52190, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-20T20:37:23,913 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-20T20:37:23,914 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T20:37:23,915 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T20:37:23,916 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:35431-0x10159f738630002, quorum=127.0.0.1:52190, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T20:37:23,918 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35431-0x10159f738630002, quorum=127.0.0.1:52190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T20:37:23,918 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35431 2024-11-20T20:37:23,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35431 2024-11-20T20:37:23,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35431 2024-11-20T20:37:23,924 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35431 2024-11-20T20:37:23,924 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35431 2024-11-20T20:37:23,926 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.HRegionServer(746): ClusterId : 0d734dc0-6efb-4a14-aefb-c16c29e0442b 2024-11-20T20:37:23,926 DEBUG [RS:1;d7d59561a64e:35431 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T20:37:23,935 DEBUG [RS:1;d7d59561a64e:35431 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T20:37:23,935 DEBUG [RS:1;d7d59561a64e:35431 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T20:37:23,946 DEBUG [RS:1;d7d59561a64e:35431 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T20:37:23,947 DEBUG [RS:1;d7d59561a64e:35431 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28a6385a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d7d59561a64e/172.17.0.2:0 2024-11-20T20:37:23,962 DEBUG [RS:1;d7d59561a64e:35431 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;d7d59561a64e:35431 2024-11-20T20:37:23,962 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T20:37:23,962 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T20:37:23,962 DEBUG [RS:1;d7d59561a64e:35431 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T20:37:23,963 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.HRegionServer(2659): reportForDuty to master=d7d59561a64e,38137,1732135042252 with port=35431, startcode=1732135043885 2024-11-20T20:37:23,963 DEBUG [RS:1;d7d59561a64e:35431 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T20:37:23,965 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49643, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T20:37:23,965 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38137 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d7d59561a64e,35431,1732135043885 2024-11-20T20:37:23,965 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38137 {}] master.ServerManager(517): Registering regionserver=d7d59561a64e,35431,1732135043885 2024-11-20T20:37:23,967 DEBUG [RS:1;d7d59561a64e:35431 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5 2024-11-20T20:37:23,967 DEBUG [RS:1;d7d59561a64e:35431 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44749 2024-11-20T20:37:23,967 DEBUG [RS:1;d7d59561a64e:35431 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T20:37:23,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T20:37:23,976 DEBUG [RS:1;d7d59561a64e:35431 {}] zookeeper.ZKUtil(111): regionserver:35431-0x10159f738630002, quorum=127.0.0.1:52190, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d7d59561a64e,35431,1732135043885 2024-11-20T20:37:23,977 WARN [RS:1;d7d59561a64e:35431 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T20:37:23,977 INFO [RS:1;d7d59561a64e:35431 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:37:23,977 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d7d59561a64e,35431,1732135043885] 2024-11-20T20:37:23,977 DEBUG [RS:1;d7d59561a64e:35431 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885 2024-11-20T20:37:23,981 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T20:37:23,983 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T20:37:23,984 INFO [RS:1;d7d59561a64e:35431 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T20:37:23,984 INFO [RS:1;d7d59561a64e:35431 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,985 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T20:37:23,986 INFO [RS:1;d7d59561a64e:35431 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T20:37:23,986 INFO [RS:1;d7d59561a64e:35431 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,986 DEBUG [RS:1;d7d59561a64e:35431 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,986 DEBUG [RS:1;d7d59561a64e:35431 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,986 DEBUG [RS:1;d7d59561a64e:35431 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,986 DEBUG [RS:1;d7d59561a64e:35431 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,986 DEBUG [RS:1;d7d59561a64e:35431 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,986 DEBUG [RS:1;d7d59561a64e:35431 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d7d59561a64e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T20:37:23,986 DEBUG [RS:1;d7d59561a64e:35431 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,987 DEBUG [RS:1;d7d59561a64e:35431 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,987 DEBUG [RS:1;d7d59561a64e:35431 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,987 DEBUG [RS:1;d7d59561a64e:35431 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,987 DEBUG [RS:1;d7d59561a64e:35431 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,987 DEBUG [RS:1;d7d59561a64e:35431 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:37:23,987 DEBUG [RS:1;d7d59561a64e:35431 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d7d59561a64e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T20:37:23,987 DEBUG [RS:1;d7d59561a64e:35431 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T20:37:23,988 INFO [RS:1;d7d59561a64e:35431 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,988 INFO [RS:1;d7d59561a64e:35431 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,988 INFO [RS:1;d7d59561a64e:35431 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,988 INFO [RS:1;d7d59561a64e:35431 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,988 INFO [RS:1;d7d59561a64e:35431 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:23,988 INFO [RS:1;d7d59561a64e:35431 {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,35431,1732135043885-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T20:37:24,006 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T20:37:24,006 INFO [RS:1;d7d59561a64e:35431 {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,35431,1732135043885-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:24,006 INFO [RS:1;d7d59561a64e:35431 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:24,007 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.Replication(171): d7d59561a64e,35431,1732135043885 started 2024-11-20T20:37:24,025 INFO [RS:1;d7d59561a64e:35431 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:37:24,026 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.HRegionServer(1482): Serving as d7d59561a64e,35431,1732135043885, RpcServer on d7d59561a64e/172.17.0.2:35431, sessionid=0x10159f738630002 2024-11-20T20:37:24,026 DEBUG [RS:1;d7d59561a64e:35431 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T20:37:24,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;d7d59561a64e:35431,5,FailOnTimeoutGroup] 2024-11-20T20:37:24,026 DEBUG [RS:1;d7d59561a64e:35431 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d7d59561a64e,35431,1732135043885 2024-11-20T20:37:24,026 DEBUG [RS:1;d7d59561a64e:35431 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd7d59561a64e,35431,1732135043885' 2024-11-20T20:37:24,026 DEBUG [RS:1;d7d59561a64e:35431 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T20:37:24,026 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-20T20:37:24,027 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T20:37:24,027 DEBUG [RS:1;d7d59561a64e:35431 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T20:37:24,028 DEBUG [RS:1;d7d59561a64e:35431 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T20:37:24,028 DEBUG [RS:1;d7d59561a64e:35431 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T20:37:24,028 DEBUG [RS:1;d7d59561a64e:35431 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d7d59561a64e,35431,1732135043885 2024-11-20T20:37:24,028 DEBUG [RS:1;d7d59561a64e:35431 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd7d59561a64e,35431,1732135043885' 2024-11-20T20:37:24,028 DEBUG [RS:1;d7d59561a64e:35431 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T20:37:24,028 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is d7d59561a64e,38137,1732135042252 2024-11-20T20:37:24,028 DEBUG [RS:1;d7d59561a64e:35431 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T20:37:24,028 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@604ab482 2024-11-20T20:37:24,028 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T20:37:24,029 DEBUG [RS:1;d7d59561a64e:35431 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T20:37:24,029 INFO [RS:1;d7d59561a64e:35431 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T20:37:24,029 INFO [RS:1;d7d59561a64e:35431 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T20:37:24,031 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36736, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T20:37:24,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38137 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T20:37:24,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38137 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T20:37:24,032 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38137 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T20:37:24,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38137 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-20T20:37:24,036 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T20:37:24,036 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:24,036 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38137 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-20T20:37:24,038 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T20:37:24,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38137 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T20:37:24,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40343 is added to blk_1073741835_1011 (size=393) 2024-11-20T20:37:24,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44305 is added to blk_1073741835_1011 (size=393) 2024-11-20T20:37:24,051 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e2f07a01674f8c986e38cded8548d2fe, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5 2024-11-20T20:37:24,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40343 is added to blk_1073741836_1012 (size=76) 2024-11-20T20:37:24,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44305 is added to blk_1073741836_1012 (size=76) 2024-11-20T20:37:24,060 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:37:24,060 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing e2f07a01674f8c986e38cded8548d2fe, disabling compactions & flushes 2024-11-20T20:37:24,060 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. 2024-11-20T20:37:24,060 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. 2024-11-20T20:37:24,060 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. after waiting 0 ms 2024-11-20T20:37:24,060 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. 2024-11-20T20:37:24,060 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. 2024-11-20T20:37:24,060 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for e2f07a01674f8c986e38cded8548d2fe: Waiting for close lock at 1732135044060Disabling compacts and flushes for region at 1732135044060Disabling writes for close at 1732135044060Writing region close event to WAL at 1732135044060Closed at 1732135044060 2024-11-20T20:37:24,062 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T20:37:24,062 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732135044062"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732135044062"}]},"ts":"1732135044062"} 2024-11-20T20:37:24,064 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T20:37:24,066 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T20:37:24,066 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732135044066"}]},"ts":"1732135044066"} 2024-11-20T20:37:24,068 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-20T20:37:24,069 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e2f07a01674f8c986e38cded8548d2fe, ASSIGN}] 2024-11-20T20:37:24,070 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e2f07a01674f8c986e38cded8548d2fe, ASSIGN 2024-11-20T20:37:24,071 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e2f07a01674f8c986e38cded8548d2fe, ASSIGN; state=OFFLINE, location=d7d59561a64e,39199,1732135042427; forceNewPlan=false, retain=false 2024-11-20T20:37:24,132 INFO [RS:1;d7d59561a64e:35431 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C35431%2C1732135043885, suffix=, logDir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885, archiveDir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/oldWALs, maxLogs=32 2024-11-20T20:37:24,133 INFO [RS:1;d7d59561a64e:35431 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C35431%2C1732135043885.1732135044133 2024-11-20T20:37:24,142 INFO [RS:1;d7d59561a64e:35431 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 2024-11-20T20:37:24,145 DEBUG [RS:1;d7d59561a64e:35431 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44639:44639),(127.0.0.1/127.0.0.1:40227:40227)] 2024-11-20T20:37:24,223 INFO [d7d59561a64e:38137 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-20T20:37:24,224 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e2f07a01674f8c986e38cded8548d2fe, regionState=OPENING, regionLocation=d7d59561a64e,39199,1732135042427 2024-11-20T20:37:24,231 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e2f07a01674f8c986e38cded8548d2fe, ASSIGN because future has completed 2024-11-20T20:37:24,232 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e2f07a01674f8c986e38cded8548d2fe, server=d7d59561a64e,39199,1732135042427}] 2024-11-20T20:37:24,398 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. 2024-11-20T20:37:24,399 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e2f07a01674f8c986e38cded8548d2fe, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe.', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:37:24,400 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:37:24,400 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:37:24,401 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:37:24,401 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:37:24,403 INFO [StoreOpener-e2f07a01674f8c986e38cded8548d2fe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:37:24,404 INFO [StoreOpener-e2f07a01674f8c986e38cded8548d2fe-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2f07a01674f8c986e38cded8548d2fe columnFamilyName info 2024-11-20T20:37:24,404 DEBUG [StoreOpener-e2f07a01674f8c986e38cded8548d2fe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:37:24,405 INFO [StoreOpener-e2f07a01674f8c986e38cded8548d2fe-1 {}] regionserver.HStore(327): Store=e2f07a01674f8c986e38cded8548d2fe/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:37:24,405 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:37:24,406 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:37:24,407 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:37:24,407 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:37:24,407 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:37:24,409 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:37:24,412 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:37:24,412 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e2f07a01674f8c986e38cded8548d2fe; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=714616, jitterRate=-0.0913194864988327}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T20:37:24,412 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:37:24,413 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e2f07a01674f8c986e38cded8548d2fe: Running coprocessor pre-open hook at 1732135044401Writing region info on filesystem at 1732135044401Initializing all the Stores at 1732135044402 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135044402Cleaning up temporary data from old regions at 1732135044407 (+5 ms)Running coprocessor post-open hooks at 1732135044412 (+5 ms)Region opened successfully at 1732135044413 (+1 ms) 2024-11-20T20:37:24,415 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe., pid=6, masterSystemTime=1732135044387 2024-11-20T20:37:24,418 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. 2024-11-20T20:37:24,418 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. 2024-11-20T20:37:24,419 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e2f07a01674f8c986e38cded8548d2fe, regionState=OPEN, openSeqNum=2, regionLocation=d7d59561a64e,39199,1732135042427 2024-11-20T20:37:24,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e2f07a01674f8c986e38cded8548d2fe, server=d7d59561a64e,39199,1732135042427 because future has completed 2024-11-20T20:37:24,426 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T20:37:24,426 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e2f07a01674f8c986e38cded8548d2fe, server=d7d59561a64e,39199,1732135042427 in 191 msec 2024-11-20T20:37:24,429 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T20:37:24,429 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e2f07a01674f8c986e38cded8548d2fe, ASSIGN in 357 msec 2024-11-20T20:37:24,430 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T20:37:24,430 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732135044430"}]},"ts":"1732135044430"} 2024-11-20T20:37:24,432 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-20T20:37:24,434 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T20:37:24,436 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 402 msec 2024-11-20T20:37:25,094 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T20:37:25,100 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:37:25,117 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:37:25,119 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:37:25,119 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:37:26,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T20:37:26,486 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T20:37:26,487 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-20T20:37:26,487 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-20T20:37:26,488 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T20:37:26,488 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T20:37:26,488 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T20:37:26,488 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T20:37:29,013 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-20T20:37:34,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38137 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T20:37:34,137 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-20T20:37:34,138 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-20T20:37:34,144 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-20T20:37:34,144 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. 2024-11-20T20:37:34,161 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:37:34,164 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:37:34,165 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:37:34,165 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:37:34,165 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T20:37:34,166 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7857b965{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:37:34,167 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1acd305{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:37:34,265 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2da17d51{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/java.io.tmpdir/jetty-localhost-39105-hadoop-hdfs-3_4_1-tests_jar-_-any-15270368900978934179/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:37:34,265 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b0604e{HTTP/1.1, (http/1.1)}{localhost:39105} 2024-11-20T20:37:34,265 INFO [Time-limited test {}] server.Server(415): Started @127972ms 2024-11-20T20:37:34,266 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:37:34,295 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:37:34,299 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:37:34,300 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:37:34,300 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:37:34,300 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T20:37:34,301 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24a7a164{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:37:34,301 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4273ed86{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:37:34,399 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2305d82d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/java.io.tmpdir/jetty-localhost-43373-hadoop-hdfs-3_4_1-tests_jar-_-any-3300258251685904561/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:37:34,399 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3f334db9{HTTP/1.1, (http/1.1)}{localhost:43373} 2024-11-20T20:37:34,399 INFO [Time-limited test {}] server.Server(415): Started @128106ms 2024-11-20T20:37:34,400 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:37:34,431 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:37:34,434 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:37:34,435 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:37:34,435 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:37:34,435 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T20:37:34,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7bbb3640{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:37:34,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@52d0466b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:37:34,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c233c6c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/java.io.tmpdir/jetty-localhost-43937-hadoop-hdfs-3_4_1-tests_jar-_-any-11365708966109006764/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:37:34,529 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4db19810{HTTP/1.1, (http/1.1)}{localhost:43937} 2024-11-20T20:37:34,529 INFO [Time-limited test {}] server.Server(415): Started @128236ms 2024-11-20T20:37:34,531 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:37:35,849 WARN [Thread-869 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data5/current/BP-1565890324-172.17.0.2-1732135040132/current, will proceed with Du for space computation calculation, 2024-11-20T20:37:35,849 WARN [Thread-870 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data6/current/BP-1565890324-172.17.0.2-1732135040132/current, will proceed with Du for space computation calculation, 2024-11-20T20:37:35,869 WARN [Thread-810 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:37:35,871 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa7ebb0866359e5d3 with lease ID 0xfb1b17cde5bf6379: Processing first storage report for DS-e964c223-bc83-430b-8acf-39e68464d158 from datanode DatanodeRegistration(127.0.0.1:37843, datanodeUuid=4f0f393a-da03-4209-b7bb-859a6f6e645c, infoPort=37283, infoSecurePort=0, ipcPort=40601, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132) 2024-11-20T20:37:35,871 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa7ebb0866359e5d3 with lease ID 0xfb1b17cde5bf6379: from storage DS-e964c223-bc83-430b-8acf-39e68464d158 node DatanodeRegistration(127.0.0.1:37843, datanodeUuid=4f0f393a-da03-4209-b7bb-859a6f6e645c, infoPort=37283, infoSecurePort=0, ipcPort=40601, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:37:35,872 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa7ebb0866359e5d3 with lease ID 0xfb1b17cde5bf6379: Processing first storage report for DS-5fa64d95-6195-427a-92b8-c63c05e97a8b from datanode DatanodeRegistration(127.0.0.1:37843, datanodeUuid=4f0f393a-da03-4209-b7bb-859a6f6e645c, infoPort=37283, infoSecurePort=0, ipcPort=40601, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132) 2024-11-20T20:37:35,872 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa7ebb0866359e5d3 with lease ID 0xfb1b17cde5bf6379: from storage DS-5fa64d95-6195-427a-92b8-c63c05e97a8b node DatanodeRegistration(127.0.0.1:37843, datanodeUuid=4f0f393a-da03-4209-b7bb-859a6f6e645c, infoPort=37283, infoSecurePort=0, ipcPort=40601, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:37:36,016 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data7/current/BP-1565890324-172.17.0.2-1732135040132/current, will proceed with Du for space computation calculation, 2024-11-20T20:37:36,016 WARN [Thread-881 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data8/current/BP-1565890324-172.17.0.2-1732135040132/current, will proceed with Du for space computation calculation, 2024-11-20T20:37:36,031 WARN [Thread-832 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:37:36,034 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe615cd460622b085 with lease ID 0xfb1b17cde5bf637a: Processing first storage report for DS-980f86ee-8f41-4692-9903-d9b67a1c440f from datanode DatanodeRegistration(127.0.0.1:36941, datanodeUuid=2b5dcdae-15e4-4944-a72e-22c5ea8ad074, infoPort=38865, infoSecurePort=0, ipcPort=45429, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132) 2024-11-20T20:37:36,034 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe615cd460622b085 with lease ID 0xfb1b17cde5bf637a: from storage DS-980f86ee-8f41-4692-9903-d9b67a1c440f node DatanodeRegistration(127.0.0.1:36941, datanodeUuid=2b5dcdae-15e4-4944-a72e-22c5ea8ad074, infoPort=38865, infoSecurePort=0, ipcPort=45429, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:37:36,034 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe615cd460622b085 with lease ID 0xfb1b17cde5bf637a: Processing first storage report for DS-81a644ea-ad6b-42bd-8670-19ae4c147993 from datanode DatanodeRegistration(127.0.0.1:36941, datanodeUuid=2b5dcdae-15e4-4944-a72e-22c5ea8ad074, infoPort=38865, infoSecurePort=0, ipcPort=45429, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132) 2024-11-20T20:37:36,034 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe615cd460622b085 with lease ID 0xfb1b17cde5bf637a: from storage DS-81a644ea-ad6b-42bd-8670-19ae4c147993 node DatanodeRegistration(127.0.0.1:36941, datanodeUuid=2b5dcdae-15e4-4944-a72e-22c5ea8ad074, infoPort=38865, infoSecurePort=0, ipcPort=45429, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:37:36,087 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data9/current/BP-1565890324-172.17.0.2-1732135040132/current, will proceed with Du for space computation calculation, 2024-11-20T20:37:36,087 WARN [Thread-892 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data10/current/BP-1565890324-172.17.0.2-1732135040132/current, will proceed with Du for space computation calculation, 2024-11-20T20:37:36,106 WARN [Thread-854 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:37:36,109 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdf3c26a216399a89 with lease ID 0xfb1b17cde5bf637b: Processing first storage report for DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7 from datanode DatanodeRegistration(127.0.0.1:38165, datanodeUuid=969adb2a-77d0-48fc-9781-e84cb49d7d72, infoPort=34891, infoSecurePort=0, ipcPort=42855, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132) 2024-11-20T20:37:36,109 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdf3c26a216399a89 with lease ID 0xfb1b17cde5bf637b: from storage DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7 node DatanodeRegistration(127.0.0.1:38165, datanodeUuid=969adb2a-77d0-48fc-9781-e84cb49d7d72, infoPort=34891, infoSecurePort=0, ipcPort=42855, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:37:36,109 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdf3c26a216399a89 with lease ID 0xfb1b17cde5bf637b: Processing first storage report for DS-b6646348-5824-46ef-90a8-5db76c0e5ec0 from datanode DatanodeRegistration(127.0.0.1:38165, datanodeUuid=969adb2a-77d0-48fc-9781-e84cb49d7d72, infoPort=34891, infoSecurePort=0, ipcPort=42855, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132) 2024-11-20T20:37:36,109 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdf3c26a216399a89 with lease ID 0xfb1b17cde5bf637b: from storage DS-b6646348-5824-46ef-90a8-5db76c0e5ec0 node DatanodeRegistration(127.0.0.1:38165, datanodeUuid=969adb2a-77d0-48fc-9781-e84cb49d7d72, infoPort=34891, infoSecurePort=0, ipcPort=42855, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T20:37:36,166 WARN [ResponseProcessor for block BP-1565890324-172.17.0.2-1732135040132:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1565890324-172.17.0.2-1732135040132:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:36,166 WARN [ResponseProcessor for block BP-1565890324-172.17.0.2-1732135040132:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1565890324-172.17.0.2-1732135040132:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:36,166 WARN [ResponseProcessor for block BP-1565890324-172.17.0.2-1732135040132:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1565890324-172.17.0.2-1732135040132:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-1565890324-172.17.0.2-1732135040132:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:36,166 WARN [ResponseProcessor for block BP-1565890324-172.17.0.2-1732135040132:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1565890324-172.17.0.2-1732135040132:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1565890324-172.17.0.2-1732135040132:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:36,167 WARN [DataStreamer for file /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 block BP-1565890324-172.17.0.2-1732135040132:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK], DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]) is bad. 2024-11-20T20:37:36,167 WARN [DataStreamer for file /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 block BP-1565890324-172.17.0.2-1732135040132:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK], DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]) is bad. 2024-11-20T20:37:36,167 WARN [DataStreamer for file /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta block BP-1565890324-172.17.0.2-1732135040132:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK], DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]) is bad. 2024-11-20T20:37:36,167 WARN [DataStreamer for file /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 block BP-1565890324-172.17.0.2-1732135040132:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK], DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]) is bad. 2024-11-20T20:37:36,167 WARN [PacketResponder: BP-1565890324-172.17.0.2-1732135040132:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40343] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:36,167 WARN [PacketResponder: BP-1565890324-172.17.0.2-1732135040132:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40343] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:36,168 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:34990 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34990 dst: /127.0.0.1:44305 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:36,168 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:35006 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35006 dst: /127.0.0.1:44305 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:36,168 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_344786555_22 at /127.0.0.1:45198 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:40343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45198 dst: /127.0.0.1:40343 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:36,168 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1316283621_22 at /127.0.0.1:45102 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45102 dst: /127.0.0.1:40343 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:36,168 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:45150 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45150 dst: /127.0.0.1:40343 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:36,168 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:45166 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45166 dst: /127.0.0.1:40343 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:36,169 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1316283621_22 at /127.0.0.1:34954 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34954 dst: /127.0.0.1:44305 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:36,169 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_344786555_22 at /127.0.0.1:35026 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:44305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35026 dst: /127.0.0.1:44305 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:36,182 WARN [BP-1565890324-172.17.0.2-1732135040132 heartbeating to localhost/127.0.0.1:44749 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1565890324-172.17.0.2-1732135040132 (Datanode Uuid 2f069166-49cc-4b43-8675-73d4df62dda0) service to localhost/127.0.0.1:44749 2024-11-20T20:37:36,238 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data3/current/BP-1565890324-172.17.0.2-1732135040132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:37:36,239 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data4/current/BP-1565890324-172.17.0.2-1732135040132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:37:36,240 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@622112cc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:37:36,241 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@74ca9210{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:37:36,242 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:37:36,242 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3fbfd6e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:37:36,242 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79bab151{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.log.dir/,STOPPED} 2024-11-20T20:37:36,245 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:37:36,246 WARN [ResponseProcessor for block BP-1565890324-172.17.0.2-1732135040132:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1565890324-172.17.0.2-1732135040132:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:36,246 WARN [ResponseProcessor for block BP-1565890324-172.17.0.2-1732135040132:blk_1073741834_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1565890324-172.17.0.2-1732135040132:blk_1073741834_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:36,246 WARN [ResponseProcessor for block BP-1565890324-172.17.0.2-1732135040132:blk_1073741830_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1565890324-172.17.0.2-1732135040132:blk_1073741830_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:36,246 WARN [ResponseProcessor for block BP-1565890324-172.17.0.2-1732135040132:blk_1073741837_1017 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1565890324-172.17.0.2-1732135040132:blk_1073741837_1017 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:36,247 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:46676 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46676 dst: /127.0.0.1:44305 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:36,247 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:46682 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46682 dst: /127.0.0.1:44305 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:36,247 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_344786555_22 at /127.0.0.1:46678 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:44305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46678 dst: /127.0.0.1:44305 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:36,247 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1316283621_22 at /127.0.0.1:46680 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46680 dst: /127.0.0.1:44305 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:36,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f809e08{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:37:36,249 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@463279{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:37:36,249 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:37:36,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ecffd8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:37:36,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46acec94{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.log.dir/,STOPPED} 2024-11-20T20:37:36,251 WARN [BP-1565890324-172.17.0.2-1732135040132 heartbeating to localhost/127.0.0.1:44749 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:37:36,251 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:37:36,251 WARN [BP-1565890324-172.17.0.2-1732135040132 heartbeating to localhost/127.0.0.1:44749 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1565890324-172.17.0.2-1732135040132 (Datanode Uuid 8f10fa94-38eb-4e68-a103-62af83273be6) service to localhost/127.0.0.1:44749 2024-11-20T20:37:36,251 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:37:36,251 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data1/current/BP-1565890324-172.17.0.2-1732135040132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:37:36,252 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data2/current/BP-1565890324-172.17.0.2-1732135040132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:37:36,252 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:37:36,256 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe., hostname=d7d59561a64e,39199,1732135042427, seqNum=2] 2024-11-20T20:37:36,257 ERROR [FSHLog-0-hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5-prefix:d7d59561a64e,39199,1732135042427 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:36,258 WARN [FSHLog-0-hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5-prefix:d7d59561a64e,39199,1732135042427 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:36,258 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:36,258 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d7d59561a64e%2C39199%2C1732135042427:(num 1732135043207) roll requested 2024-11-20T20:37:36,258 INFO [regionserver/d7d59561a64e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C39199%2C1732135042427.1732135056258 2024-11-20T20:37:36,264 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:36,264 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:36,264 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:36,264 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:36,264 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:36,265 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135056258 2024-11-20T20:37:36,265 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:36,265 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:36,266 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-20T20:37:36,266 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-20T20:37:36,266 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 2024-11-20T20:37:36,268 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37283:37283),(127.0.0.1/127.0.0.1:34891:34891)] 2024-11-20T20:37:36,268 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 is not closed yet, will try archiving it next time 2024-11-20T20:37:36,269 WARN [IPC Server handler 2 on default port 44749 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1015 2024-11-20T20:37:36,272 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 after 5ms 2024-11-20T20:37:36,534 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:37,989 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:38,268 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:38,269 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135056258 2024-11-20T20:37:38,270 WARN [ResponseProcessor for block BP-1565890324-172.17.0.2-1732135040132:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1565890324-172.17.0.2-1732135040132:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:38,271 WARN [DataStreamer for file /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135056258 block BP-1565890324-172.17.0.2-1732135040132:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK], DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]) is bad. 2024-11-20T20:37:38,271 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:43024 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:37843:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43024 dst: /127.0.0.1:37843 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:38,271 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:36578 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:38165:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36578 dst: /127.0.0.1:38165 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:38,318 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2da17d51{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:37:38,319 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b0604e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:37:38,319 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:37:38,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1acd305{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:37:38,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7857b965{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.log.dir/,STOPPED} 2024-11-20T20:37:38,322 WARN [BP-1565890324-172.17.0.2-1732135040132 heartbeating to localhost/127.0.0.1:44749 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:37:38,322 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:37:38,322 WARN [BP-1565890324-172.17.0.2-1732135040132 heartbeating to localhost/127.0.0.1:44749 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1565890324-172.17.0.2-1732135040132 (Datanode Uuid 4f0f393a-da03-4209-b7bb-859a6f6e645c) service to localhost/127.0.0.1:44749 2024-11-20T20:37:38,322 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:37:38,323 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data5/current/BP-1565890324-172.17.0.2-1732135040132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:37:38,323 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data6/current/BP-1565890324-172.17.0.2-1732135040132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:37:38,323 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:37:38,534 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:39,989 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:40,268 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:40,269 WARN [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]] 2024-11-20T20:37:40,269 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d7d59561a64e%2C39199%2C1732135042427:(num 1732135056258) roll requested 2024-11-20T20:37:40,269 INFO [regionserver/d7d59561a64e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C39199%2C1732135042427.1732135060269 2024-11-20T20:37:40,273 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 after 4007ms 2024-11-20T20:37:40,273 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:40,273 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK], DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]) is bad. 2024-11-20T20:37:40,273 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741839_1021 2024-11-20T20:37:40,277 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK] 2024-11-20T20:37:40,280 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:40,281 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK], DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]) is bad. 2024-11-20T20:37:40,281 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741840_1022 2024-11-20T20:37:40,282 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK] 2024-11-20T20:37:40,284 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37843 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:40,284 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:36592 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data10]'}, localName='127.0.0.1:38165', datanodeUuid='969adb2a-77d0-48fc-9781-e84cb49d7d72', xmitsInProgress=0}:Exception transferring block BP-1565890324-172.17.0.2-1732135040132:blk_1073741841_1023 to mirror 127.0.0.1:37843 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:40,284 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK], DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]) is bad. 2024-11-20T20:37:40,284 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741841_1023 2024-11-20T20:37:40,284 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:36592 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T20:37:40,284 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:36592 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:38165:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36592 dst: /127.0.0.1:38165 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:40,285 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK] 2024-11-20T20:37:40,290 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:40,290 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:40,290 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:40,290 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:40,290 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:40,290 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135056258 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135060269 2024-11-20T20:37:40,292 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38865:38865),(127.0.0.1/127.0.0.1:34891:34891)] 2024-11-20T20:37:40,292 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 is not closed yet, will try archiving it next time 2024-11-20T20:37:40,292 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135056258 is not closed yet, will try archiving it next time 2024-11-20T20:37:40,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38165 is added to blk_1073741838_1020 (size=2431) 2024-11-20T20:37:40,330 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-20T20:37:40,534 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:40,694 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 is not closed yet, will try archiving it next time 2024-11-20T20:37:41,990 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:42,145 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@67052e87[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38165, datanodeUuid=969adb2a-77d0-48fc-9781-e84cb49d7d72, infoPort=34891, infoSecurePort=0, ipcPort=42855, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132):Failed to transfer BP-1565890324-172.17.0.2-1732135040132:blk_1073741838_1020 to 127.0.0.1:40343 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:42,292 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:42,336 WARN [ResponseProcessor for block BP-1565890324-172.17.0.2-1732135040132:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1565890324-172.17.0.2-1732135040132:blk_1073741842_1024 java.io.IOException: Bad response ERROR for BP-1565890324-172.17.0.2-1732135040132:blk_1073741842_1024 from datanode DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:42,337 WARN [DataStreamer for file /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135060269 block BP-1565890324-172.17.0.2-1732135040132:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK], DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]) is bad. 2024-11-20T20:37:42,337 WARN [PacketResponder: BP-1565890324-172.17.0.2-1732135040132:blk_1073741842_1024, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38165] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:42,338 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:34674 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:36941:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34674 dst: /127.0.0.1:36941 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:42,339 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:36598 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:38165:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36598 dst: /127.0.0.1:38165 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:42,381 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c233c6c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:37:42,381 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4db19810{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:37:42,381 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:37:42,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@52d0466b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:37:42,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7bbb3640{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.log.dir/,STOPPED} 2024-11-20T20:37:42,383 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:37:42,383 WARN [BP-1565890324-172.17.0.2-1732135040132 heartbeating to localhost/127.0.0.1:44749 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:37:42,384 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:37:42,384 WARN [BP-1565890324-172.17.0.2-1732135040132 heartbeating to localhost/127.0.0.1:44749 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1565890324-172.17.0.2-1732135040132 (Datanode Uuid 969adb2a-77d0-48fc-9781-e84cb49d7d72) service to localhost/127.0.0.1:44749 2024-11-20T20:37:42,384 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data9/current/BP-1565890324-172.17.0.2-1732135040132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:37:42,384 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data10/current/BP-1565890324-172.17.0.2-1732135040132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:37:42,385 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:37:42,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39199 {}] regionserver.HRegion(8855): Flush requested on e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:37:42,398 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2f07a01674f8c986e38cded8548d2fe 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T20:37:42,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/48e28e19818e41a188ec22710bca8ecd is 1080, key is row0002/info:/1732135058325/Put/seqid=0 2024-11-20T20:37:42,425 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:42,425 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK], DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]) is bad. 2024-11-20T20:37:42,425 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741843_1026 2024-11-20T20:37:42,426 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK] 2024-11-20T20:37:42,427 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:42,427 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK], DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]) is bad. 2024-11-20T20:37:42,427 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741844_1027 2024-11-20T20:37:42,428 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK] 2024-11-20T20:37:42,429 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:42,429 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK], DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]) is bad. 2024-11-20T20:37:42,429 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741845_1028 2024-11-20T20:37:42,429 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK] 2024-11-20T20:37:42,432 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40343 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:42,432 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44840 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data8]'}, localName='127.0.0.1:36941', datanodeUuid='2b5dcdae-15e4-4944-a72e-22c5ea8ad074', xmitsInProgress=0}:Exception transferring block BP-1565890324-172.17.0.2-1732135040132:blk_1073741846_1029 to mirror 127.0.0.1:40343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:42,432 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK], DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]) is bad. 2024-11-20T20:37:42,432 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741846_1029 2024-11-20T20:37:42,432 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44840 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T20:37:42,432 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44840 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:36941:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44840 dst: /127.0.0.1:36941 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:42,432 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK] 2024-11-20T20:37:42,433 WARN [IPC Server handler 4 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T20:37:42,433 WARN [IPC Server handler 4 on default port 44749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T20:37:42,433 WARN [IPC Server handler 4 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T20:37:42,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741847_1030 (size=10347) 2024-11-20T20:37:42,535 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:42,838 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/48e28e19818e41a188ec22710bca8ecd 2024-11-20T20:37:42,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/48e28e19818e41a188ec22710bca8ecd as hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/48e28e19818e41a188ec22710bca8ecd 2024-11-20T20:37:42,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/48e28e19818e41a188ec22710bca8ecd, entries=5, sequenceid=11, filesize=10.1 K 2024-11-20T20:37:42,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for e2f07a01674f8c986e38cded8548d2fe in 462ms, sequenceid=11, compaction requested=false 2024-11-20T20:37:42,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2f07a01674f8c986e38cded8548d2fe: 2024-11-20T20:37:43,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39199 {}] regionserver.HRegion(8855): Flush requested on e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:37:43,032 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2f07a01674f8c986e38cded8548d2fe 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-20T20:37:43,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/326d2a2f61cd4147ac1c3c3ab544f287 is 1080, key is row0007/info:/1732135062400/Put/seqid=0 2024-11-20T20:37:43,042 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:43,042 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK], DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]) is bad. 2024-11-20T20:37:43,042 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741848_1031 2024-11-20T20:37:43,043 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK] 2024-11-20T20:37:43,046 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44305 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:43,046 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44870 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data8]'}, localName='127.0.0.1:36941', datanodeUuid='2b5dcdae-15e4-4944-a72e-22c5ea8ad074', xmitsInProgress=0}:Exception transferring block BP-1565890324-172.17.0.2-1732135040132:blk_1073741849_1032 to mirror 127.0.0.1:44305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:43,046 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK], DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]) is bad. 2024-11-20T20:37:43,046 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741849_1032 2024-11-20T20:37:43,046 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44870 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T20:37:43,046 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44870 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:36941:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44870 dst: /127.0.0.1:36941 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:43,047 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK] 2024-11-20T20:37:43,049 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:43,049 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK], DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]) is bad. 2024-11-20T20:37:43,049 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741850_1033 2024-11-20T20:37:43,050 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK] 2024-11-20T20:37:43,052 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:43,052 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK], DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]) is bad. 2024-11-20T20:37:43,052 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741851_1034 2024-11-20T20:37:43,053 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK] 2024-11-20T20:37:43,054 WARN [IPC Server handler 2 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T20:37:43,054 WARN [IPC Server handler 2 on default port 44749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T20:37:43,054 WARN [IPC Server handler 2 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T20:37:43,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741852_1035 (size=12506) 2024-11-20T20:37:43,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/326d2a2f61cd4147ac1c3c3ab544f287 2024-11-20T20:37:43,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/326d2a2f61cd4147ac1c3c3ab544f287 as hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/326d2a2f61cd4147ac1c3c3ab544f287 2024-11-20T20:37:43,473 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/326d2a2f61cd4147ac1c3c3ab544f287, entries=7, sequenceid=24, filesize=12.2 K 2024-11-20T20:37:43,474 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for e2f07a01674f8c986e38cded8548d2fe in 442ms, sequenceid=24, compaction requested=false 2024-11-20T20:37:43,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2f07a01674f8c986e38cded8548d2fe: 2024-11-20T20:37:43,475 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-20T20:37:43,475 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:37:43,475 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/326d2a2f61cd4147ac1c3c3ab544f287 because midkey is the same as first or last row 2024-11-20T20:37:43,992 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:44,293 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:44,293 WARN [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK]] 2024-11-20T20:37:44,293 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d7d59561a64e%2C39199%2C1732135042427:(num 1732135060269) roll requested 2024-11-20T20:37:44,294 INFO [regionserver/d7d59561a64e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C39199%2C1732135042427.1732135064293 2024-11-20T20:37:44,298 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44305 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:44,298 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44892 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data8]'}, localName='127.0.0.1:36941', datanodeUuid='2b5dcdae-15e4-4944-a72e-22c5ea8ad074', xmitsInProgress=0}:Exception transferring block BP-1565890324-172.17.0.2-1732135040132:blk_1073741853_1036 to mirror 127.0.0.1:44305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:44,298 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK], DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]) is bad. 2024-11-20T20:37:44,298 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741853_1036 2024-11-20T20:37:44,298 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44892 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T20:37:44,298 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44892 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:36941:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44892 dst: /127.0.0.1:36941 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:44,299 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK] 2024-11-20T20:37:44,302 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40343 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:44,302 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44908 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data8]'}, localName='127.0.0.1:36941', datanodeUuid='2b5dcdae-15e4-4944-a72e-22c5ea8ad074', xmitsInProgress=0}:Exception transferring block BP-1565890324-172.17.0.2-1732135040132:blk_1073741854_1037 to mirror 127.0.0.1:40343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:44,303 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK], DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]) is bad. 2024-11-20T20:37:44,303 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741854_1037 2024-11-20T20:37:44,303 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44908 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T20:37:44,304 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK] 2024-11-20T20:37:44,303 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44908 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:36941:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44908 dst: /127.0.0.1:36941 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:44,306 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:44,306 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK], DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]) is bad. 2024-11-20T20:37:44,306 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741855_1038 2024-11-20T20:37:44,306 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK] 2024-11-20T20:37:44,310 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37843 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:44,309 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44912 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data8]'}, localName='127.0.0.1:36941', datanodeUuid='2b5dcdae-15e4-4944-a72e-22c5ea8ad074', xmitsInProgress=0}:Exception transferring block BP-1565890324-172.17.0.2-1732135040132:blk_1073741856_1039 to mirror 127.0.0.1:37843 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:44,310 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK], DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]) is bad. 2024-11-20T20:37:44,310 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44912 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T20:37:44,310 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741856_1039 2024-11-20T20:37:44,310 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44912 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:36941:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44912 dst: /127.0.0.1:36941 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:44,311 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK] 2024-11-20T20:37:44,312 WARN [IPC Server handler 1 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T20:37:44,312 WARN [IPC Server handler 1 on default port 44749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T20:37:44,312 WARN [IPC Server handler 1 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T20:37:44,316 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:44,316 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:44,317 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:44,317 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:44,317 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:44,317 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135060269 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135064293 2024-11-20T20:37:44,318 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38865:38865)] 2024-11-20T20:37:44,318 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 is not closed yet, will try archiving it next time 2024-11-20T20:37:44,318 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135060269 is not closed yet, will try archiving it next time 2024-11-20T20:37:44,319 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135056258 to hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/oldWALs/d7d59561a64e%2C39199%2C1732135042427.1732135056258 2024-11-20T20:37:44,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741842_1025 (size=25992) 2024-11-20T20:37:44,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39199 {}] regionserver.HRegion(8855): Flush requested on e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:37:44,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2f07a01674f8c986e38cded8548d2fe 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-20T20:37:44,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/c72eb9a950de480baede797b31d4cf17 is 1079, key is tmprow/info:/1732135064454/Put/seqid=0 2024-11-20T20:37:44,464 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:44,465 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK], DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]) is bad. 2024-11-20T20:37:44,465 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741858_1041 2024-11-20T20:37:44,465 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK] 2024-11-20T20:37:44,467 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:44,468 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK], DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]) is bad. 2024-11-20T20:37:44,468 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741859_1042 2024-11-20T20:37:44,469 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK] 2024-11-20T20:37:44,470 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:44,471 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK], DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]) is bad. 2024-11-20T20:37:44,471 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741860_1043 2024-11-20T20:37:44,472 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK] 2024-11-20T20:37:44,475 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44934 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data8]'}, localName='127.0.0.1:36941', datanodeUuid='2b5dcdae-15e4-4944-a72e-22c5ea8ad074', xmitsInProgress=0}:Exception transferring block BP-1565890324-172.17.0.2-1732135040132:blk_1073741861_1044 to mirror 127.0.0.1:37843 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:44,475 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37843 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:44,475 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK], DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]) is bad. 2024-11-20T20:37:44,475 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741861_1044 2024-11-20T20:37:44,475 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44934 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T20:37:44,476 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44934 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:36941:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44934 dst: /127.0.0.1:36941 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:44,476 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK] 2024-11-20T20:37:44,477 WARN [IPC Server handler 4 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T20:37:44,477 WARN [IPC Server handler 4 on default port 44749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T20:37:44,477 WARN [IPC Server handler 4 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T20:37:44,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741862_1045 (size=6027) 2024-11-20T20:37:44,482 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/c72eb9a950de480baede797b31d4cf17 2024-11-20T20:37:44,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/c72eb9a950de480baede797b31d4cf17 as hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/c72eb9a950de480baede797b31d4cf17 2024-11-20T20:37:44,503 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/c72eb9a950de480baede797b31d4cf17, entries=1, sequenceid=34, filesize=5.9 K 2024-11-20T20:37:44,505 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for e2f07a01674f8c986e38cded8548d2fe in 49ms, sequenceid=34, compaction requested=true 2024-11-20T20:37:44,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2f07a01674f8c986e38cded8548d2fe: 2024-11-20T20:37:44,506 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-20T20:37:44,506 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:37:44,506 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/326d2a2f61cd4147ac1c3c3ab544f287 because midkey is the same as first or last row 2024-11-20T20:37:44,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2f07a01674f8c986e38cded8548d2fe:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T20:37:44,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:37:44,506 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T20:37:44,507 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T20:37:44,508 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.HStore(1541): e2f07a01674f8c986e38cded8548d2fe/info is initiating minor compaction (all files) 2024-11-20T20:37:44,508 INFO [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e2f07a01674f8c986e38cded8548d2fe/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. 2024-11-20T20:37:44,508 INFO [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/48e28e19818e41a188ec22710bca8ecd, hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/326d2a2f61cd4147ac1c3c3ab544f287, hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/c72eb9a950de480baede797b31d4cf17] into tmpdir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp, totalSize=28.2 K 2024-11-20T20:37:44,509 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] compactions.Compactor(225): Compacting 48e28e19818e41a188ec22710bca8ecd, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732135058325 2024-11-20T20:37:44,510 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] compactions.Compactor(225): Compacting 326d2a2f61cd4147ac1c3c3ab544f287, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732135062400 2024-11-20T20:37:44,510 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] compactions.Compactor(225): Compacting c72eb9a950de480baede797b31d4cf17, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732135064454 2024-11-20T20:37:44,529 INFO [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2f07a01674f8c986e38cded8548d2fe#info#compaction#21 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T20:37:44,530 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/62d6856d57144aa6bbc8c48928b693e6 is 1080, key is row0002/info:/1732135058325/Put/seqid=0 2024-11-20T20:37:44,532 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:44,533 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK], DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]) is bad. 2024-11-20T20:37:44,533 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741863_1046 2024-11-20T20:37:44,533 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK] 2024-11-20T20:37:44,535 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:44,535 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK], DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]) is bad. 2024-11-20T20:37:44,535 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741864_1047 2024-11-20T20:37:44,535 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:44,535 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK] 2024-11-20T20:37:44,537 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:44,537 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK], DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]) is bad. 2024-11-20T20:37:44,537 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741865_1048 2024-11-20T20:37:44,538 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK] 2024-11-20T20:37:44,540 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:44,541 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK], DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]) is bad. 2024-11-20T20:37:44,541 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741866_1049 2024-11-20T20:37:44,541 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK] 2024-11-20T20:37:44,542 WARN [IPC Server handler 4 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T20:37:44,543 WARN [IPC Server handler 4 on default port 44749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T20:37:44,543 WARN [IPC Server handler 4 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T20:37:44,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741867_1050 (size=17994) 2024-11-20T20:37:44,720 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 is not closed yet, will try archiving it next time 2024-11-20T20:37:44,720 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135060269 to hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/oldWALs/d7d59561a64e%2C39199%2C1732135042427.1732135060269 2024-11-20T20:37:44,974 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/62d6856d57144aa6bbc8c48928b693e6 as hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/62d6856d57144aa6bbc8c48928b693e6 2024-11-20T20:37:44,983 INFO [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e2f07a01674f8c986e38cded8548d2fe/info of e2f07a01674f8c986e38cded8548d2fe into 62d6856d57144aa6bbc8c48928b693e6(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T20:37:44,983 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e2f07a01674f8c986e38cded8548d2fe: 2024-11-20T20:37:44,983 INFO [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe., storeName=e2f07a01674f8c986e38cded8548d2fe/info, priority=13, startTime=1732135064506; duration=0sec 2024-11-20T20:37:44,983 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-20T20:37:44,983 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:37:44,983 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/62d6856d57144aa6bbc8c48928b693e6 because midkey is the same as first or last row 2024-11-20T20:37:44,984 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-20T20:37:44,984 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:37:44,984 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/62d6856d57144aa6bbc8c48928b693e6 because midkey is the same as first or last row 2024-11-20T20:37:44,984 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-20T20:37:44,984 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:37:44,984 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/62d6856d57144aa6bbc8c48928b693e6 because midkey is the same as first or last row 2024-11-20T20:37:44,984 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:37:44,984 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2f07a01674f8c986e38cded8548d2fe:info 2024-11-20T20:37:45,036 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4e427ce7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36941, datanodeUuid=2b5dcdae-15e4-4944-a72e-22c5ea8ad074, infoPort=38865, infoSecurePort=0, ipcPort=45429, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132):Failed to transfer BP-1565890324-172.17.0.2-1732135040132:blk_1073741847_1030 to 127.0.0.1:44305 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:45,036 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40859b5f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36941, datanodeUuid=2b5dcdae-15e4-4944-a72e-22c5ea8ad074, infoPort=38865, infoSecurePort=0, ipcPort=45429, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132):Failed to transfer BP-1565890324-172.17.0.2-1732135040132:blk_1073741852_1035 to 127.0.0.1:38165 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:45,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39199 {}] regionserver.HRegion(8855): Flush requested on e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:37:45,878 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2f07a01674f8c986e38cded8548d2fe 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-20T20:37:45,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/46b438a775a54c8fb766ada2d58793d9 is 1079, key is tmprow/info:/1732135065876/Put/seqid=0 2024-11-20T20:37:45,885 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:45,886 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK], DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]) is bad. 2024-11-20T20:37:45,886 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741868_1051 2024-11-20T20:37:45,887 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK] 2024-11-20T20:37:45,888 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:45,889 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK], DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]) is bad. 2024-11-20T20:37:45,889 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741869_1052 2024-11-20T20:37:45,889 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK] 2024-11-20T20:37:45,891 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:45,891 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK], DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]) is bad. 2024-11-20T20:37:45,891 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741870_1053 2024-11-20T20:37:45,892 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK] 2024-11-20T20:37:45,895 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38165 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:45,895 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44972 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data8]'}, localName='127.0.0.1:36941', datanodeUuid='2b5dcdae-15e4-4944-a72e-22c5ea8ad074', xmitsInProgress=0}:Exception transferring block BP-1565890324-172.17.0.2-1732135040132:blk_1073741871_1054 to mirror 127.0.0.1:38165 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:45,896 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK], DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]) is bad. 2024-11-20T20:37:45,896 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741871_1054 2024-11-20T20:37:45,896 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44972 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T20:37:45,896 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:44972 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:36941:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44972 dst: /127.0.0.1:36941 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:45,901 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK] 2024-11-20T20:37:45,902 WARN [IPC Server handler 4 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T20:37:45,902 WARN [IPC Server handler 4 on default port 44749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T20:37:45,902 WARN [IPC Server handler 4 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T20:37:45,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741872_1055 (size=6027) 2024-11-20T20:37:45,992 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:46,036 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40859b5f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36941, datanodeUuid=2b5dcdae-15e4-4944-a72e-22c5ea8ad074, infoPort=38865, infoSecurePort=0, ipcPort=45429, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132):Failed to transfer BP-1565890324-172.17.0.2-1732135040132:blk_1073741862_1045 to 127.0.0.1:38165 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:46,036 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4e427ce7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36941, datanodeUuid=2b5dcdae-15e4-4944-a72e-22c5ea8ad074, infoPort=38865, infoSecurePort=0, ipcPort=45429, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132):Failed to transfer BP-1565890324-172.17.0.2-1732135040132:blk_1073741842_1025 to 127.0.0.1:37843 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:46,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/46b438a775a54c8fb766ada2d58793d9 2024-11-20T20:37:46,317 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/46b438a775a54c8fb766ada2d58793d9 as hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/46b438a775a54c8fb766ada2d58793d9 2024-11-20T20:37:46,319 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:46,319 WARN [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK]] 2024-11-20T20:37:46,319 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d7d59561a64e%2C39199%2C1732135042427:(num 1732135064293) roll requested 2024-11-20T20:37:46,320 INFO [regionserver/d7d59561a64e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C39199%2C1732135042427.1732135066319 2024-11-20T20:37:46,323 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:46,323 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK], DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]) is bad. 2024-11-20T20:37:46,323 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741873_1056 2024-11-20T20:37:46,324 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK] 2024-11-20T20:37:46,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/46b438a775a54c8fb766ada2d58793d9, entries=1, sequenceid=45, filesize=5.9 K 2024-11-20T20:37:46,326 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for e2f07a01674f8c986e38cded8548d2fe in 448ms, sequenceid=45, compaction requested=false 2024-11-20T20:37:46,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2f07a01674f8c986e38cded8548d2fe: 2024-11-20T20:37:46,326 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:46,326 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-20T20:37:46,326 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:37:46,326 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK], DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]) is bad. 2024-11-20T20:37:46,327 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741874_1057 2024-11-20T20:37:46,327 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/62d6856d57144aa6bbc8c48928b693e6 because midkey is the same as first or last row 2024-11-20T20:37:46,327 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK] 2024-11-20T20:37:46,329 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:46,329 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK], DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]) is bad. 2024-11-20T20:37:46,330 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741875_1058 2024-11-20T20:37:46,330 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK] 2024-11-20T20:37:46,332 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:46,332 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK], DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]) is bad. 2024-11-20T20:37:46,332 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741876_1059 2024-11-20T20:37:46,333 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK] 2024-11-20T20:37:46,334 WARN [IPC Server handler 4 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T20:37:46,334 WARN [IPC Server handler 4 on default port 44749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T20:37:46,334 WARN [IPC Server handler 4 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T20:37:46,345 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:46,346 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:46,346 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:46,346 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:46,346 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:46,346 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135064293 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135066319 2024-11-20T20:37:46,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741857_1040 (size=13591) 2024-11-20T20:37:46,356 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38865:38865)] 2024-11-20T20:37:46,356 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 is not closed yet, will try archiving it next time 2024-11-20T20:37:46,356 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135064293 is not closed yet, will try archiving it next time 2024-11-20T20:37:46,536 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:46,749 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 is not closed yet, will try archiving it next time 2024-11-20T20:37:47,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39199 {}] regionserver.HRegion(8855): Flush requested on e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:37:47,299 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2f07a01674f8c986e38cded8548d2fe 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-20T20:37:47,304 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/0dcedfa0f07448048c92d662a8527194 is 1079, key is tmprow/info:/1732135067297/Put/seqid=0 2024-11-20T20:37:47,308 WARN [Thread-977 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44305 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:47,308 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:45010 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741878_1061] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data8]'}, localName='127.0.0.1:36941', datanodeUuid='2b5dcdae-15e4-4944-a72e-22c5ea8ad074', xmitsInProgress=0}:Exception transferring block BP-1565890324-172.17.0.2-1732135040132:blk_1073741878_1061 to mirror 127.0.0.1:44305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:47,308 WARN [Thread-977 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK], DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]) is bad. 2024-11-20T20:37:47,308 WARN [Thread-977 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741878_1061 2024-11-20T20:37:47,308 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:45010 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741878_1061] {}] datanode.BlockReceiver(316): Block 1073741878 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T20:37:47,308 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:45010 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741878_1061] {}] datanode.DataXceiver(331): 127.0.0.1:36941:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45010 dst: /127.0.0.1:36941 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:47,309 WARN [Thread-977 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK] 2024-11-20T20:37:47,310 WARN [Thread-977 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:47,310 WARN [Thread-977 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK], DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]) is bad. 2024-11-20T20:37:47,311 WARN [Thread-977 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741879_1062 2024-11-20T20:37:47,311 WARN [Thread-977 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK] 2024-11-20T20:37:47,312 WARN [Thread-977 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:47,313 WARN [Thread-977 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK], DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]) is bad. 2024-11-20T20:37:47,313 WARN [Thread-977 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741880_1063 2024-11-20T20:37:47,313 WARN [Thread-977 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK] 2024-11-20T20:37:47,315 WARN [Thread-977 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:47,315 WARN [Thread-977 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK], DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]) is bad. 2024-11-20T20:37:47,315 WARN [Thread-977 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741881_1064 2024-11-20T20:37:47,316 WARN [Thread-977 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK] 2024-11-20T20:37:47,317 WARN [IPC Server handler 1 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T20:37:47,317 WARN [IPC Server handler 1 on default port 44749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T20:37:47,317 WARN [IPC Server handler 1 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T20:37:47,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741882_1065 (size=6027) 2024-11-20T20:37:47,723 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/0dcedfa0f07448048c92d662a8527194 2024-11-20T20:37:47,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/0dcedfa0f07448048c92d662a8527194 as hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/0dcedfa0f07448048c92d662a8527194 2024-11-20T20:37:47,739 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/0dcedfa0f07448048c92d662a8527194, entries=1, sequenceid=55, filesize=5.9 K 2024-11-20T20:37:47,740 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for e2f07a01674f8c986e38cded8548d2fe in 441ms, sequenceid=55, compaction requested=true 2024-11-20T20:37:47,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2f07a01674f8c986e38cded8548d2fe: 2024-11-20T20:37:47,741 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-20T20:37:47,741 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:37:47,741 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/62d6856d57144aa6bbc8c48928b693e6 because midkey is the same as first or last row 2024-11-20T20:37:47,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2f07a01674f8c986e38cded8548d2fe:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T20:37:47,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:37:47,741 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T20:37:47,742 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T20:37:47,742 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.HStore(1541): e2f07a01674f8c986e38cded8548d2fe/info is initiating minor compaction (all files) 2024-11-20T20:37:47,742 INFO [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e2f07a01674f8c986e38cded8548d2fe/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. 2024-11-20T20:37:47,743 INFO [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/62d6856d57144aa6bbc8c48928b693e6, hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/46b438a775a54c8fb766ada2d58793d9, hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/0dcedfa0f07448048c92d662a8527194] into tmpdir=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp, totalSize=29.3 K 2024-11-20T20:37:47,743 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] compactions.Compactor(225): Compacting 62d6856d57144aa6bbc8c48928b693e6, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732135058325 2024-11-20T20:37:47,744 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] compactions.Compactor(225): Compacting 46b438a775a54c8fb766ada2d58793d9, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732135065876 2024-11-20T20:37:47,744 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0dcedfa0f07448048c92d662a8527194, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732135067297 2024-11-20T20:37:47,765 INFO [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2f07a01674f8c986e38cded8548d2fe#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T20:37:47,766 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/21b78e21c377443f864fc8a7558aaf06 is 1080, key is row0002/info:/1732135058325/Put/seqid=0 2024-11-20T20:37:47,768 WARN [Thread-982 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:47,768 WARN [Thread-982 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK], DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]) is bad. 2024-11-20T20:37:47,768 WARN [Thread-982 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741883_1066 2024-11-20T20:37:47,769 WARN [Thread-982 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40343,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK] 2024-11-20T20:37:47,770 WARN [Thread-982 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:47,771 WARN [Thread-982 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK], DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]) is bad. 2024-11-20T20:37:47,771 WARN [Thread-982 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741884_1067 2024-11-20T20:37:47,771 WARN [Thread-982 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK] 2024-11-20T20:37:47,775 WARN [Thread-982 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37843 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:47,775 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:45028 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data8]'}, localName='127.0.0.1:36941', datanodeUuid='2b5dcdae-15e4-4944-a72e-22c5ea8ad074', xmitsInProgress=0}:Exception transferring block BP-1565890324-172.17.0.2-1732135040132:blk_1073741885_1068 to mirror 127.0.0.1:37843 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:47,775 WARN [Thread-982 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK], DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]) is bad. 2024-11-20T20:37:47,775 WARN [Thread-982 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741885_1068 2024-11-20T20:37:47,775 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:45028 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T20:37:47,775 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:45028 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:36941:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45028 dst: /127.0.0.1:36941 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:47,776 WARN [Thread-982 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK] 2024-11-20T20:37:47,778 WARN [Thread-982 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44305 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:47,778 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:45044 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data8]'}, localName='127.0.0.1:36941', datanodeUuid='2b5dcdae-15e4-4944-a72e-22c5ea8ad074', xmitsInProgress=0}:Exception transferring block BP-1565890324-172.17.0.2-1732135040132:blk_1073741886_1069 to mirror 127.0.0.1:44305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:47,778 WARN [Thread-982 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK], DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]) is bad. 2024-11-20T20:37:47,778 WARN [Thread-982 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741886_1069 2024-11-20T20:37:47,778 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:45044 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T20:37:47,779 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_620819321_22 at /127.0.0.1:45044 [Receiving block BP-1565890324-172.17.0.2-1732135040132:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:36941:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45044 dst: /127.0.0.1:36941 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:47,779 WARN [Thread-982 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK] 2024-11-20T20:37:47,780 WARN [IPC Server handler 1 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T20:37:47,780 WARN [IPC Server handler 1 on default port 44749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T20:37:47,780 WARN [IPC Server handler 1 on default port 44749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T20:37:47,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741887_1070 (size=18097) 2024-11-20T20:37:47,993 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:48,036 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4e427ce7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36941, datanodeUuid=2b5dcdae-15e4-4944-a72e-22c5ea8ad074, infoPort=38865, infoSecurePort=0, ipcPort=45429, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132):Failed to transfer BP-1565890324-172.17.0.2-1732135040132:blk_1073741872_1055 to 127.0.0.1:38165 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:48,036 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40859b5f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36941, datanodeUuid=2b5dcdae-15e4-4944-a72e-22c5ea8ad074, infoPort=38865, infoSecurePort=0, ipcPort=45429, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132):Failed to transfer BP-1565890324-172.17.0.2-1732135040132:blk_1073741867_1050 to 127.0.0.1:38165 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:48,192 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/21b78e21c377443f864fc8a7558aaf06 as hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/21b78e21c377443f864fc8a7558aaf06 2024-11-20T20:37:48,200 INFO [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e2f07a01674f8c986e38cded8548d2fe/info of e2f07a01674f8c986e38cded8548d2fe into 21b78e21c377443f864fc8a7558aaf06(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T20:37:48,200 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e2f07a01674f8c986e38cded8548d2fe: 2024-11-20T20:37:48,200 INFO [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe., storeName=e2f07a01674f8c986e38cded8548d2fe/info, priority=13, startTime=1732135067741; duration=0sec 2024-11-20T20:37:48,201 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-20T20:37:48,201 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:37:48,201 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/21b78e21c377443f864fc8a7558aaf06 because midkey is the same as first or last row 2024-11-20T20:37:48,201 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-20T20:37:48,201 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:37:48,201 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/21b78e21c377443f864fc8a7558aaf06 because midkey is the same as first or last row 2024-11-20T20:37:48,201 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-20T20:37:48,201 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:37:48,201 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/21b78e21c377443f864fc8a7558aaf06 because midkey is the same as first or last row 2024-11-20T20:37:48,201 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:37:48,201 DEBUG [RS:0;d7d59561a64e:39199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2f07a01674f8c986e38cded8548d2fe:info 2024-11-20T20:37:48,357 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:48,357 WARN [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-20T20:37:48,526 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:37:48,532 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:37:48,536 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:48,552 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:37:48,552 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:37:48,552 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T20:37:48,553 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67b1d1e5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:37:48,553 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44bf135d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:37:48,669 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2d144912{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/java.io.tmpdir/jetty-localhost-34913-hadoop-hdfs-3_4_1-tests_jar-_-any-6645829403194187976/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:37:48,670 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4982983{HTTP/1.1, (http/1.1)}{localhost:34913} 2024-11-20T20:37:48,670 INFO [Time-limited test {}] server.Server(415): Started @142376ms 2024-11-20T20:37:48,671 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:37:49,037 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40859b5f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36941, datanodeUuid=2b5dcdae-15e4-4944-a72e-22c5ea8ad074, infoPort=38865, infoSecurePort=0, ipcPort=45429, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132):Failed to transfer BP-1565890324-172.17.0.2-1732135040132:blk_1073741882_1065 to 127.0.0.1:37843 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:49,037 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4e427ce7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36941, datanodeUuid=2b5dcdae-15e4-4944-a72e-22c5ea8ad074, infoPort=38865, infoSecurePort=0, ipcPort=45429, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132):Failed to transfer BP-1565890324-172.17.0.2-1732135040132:blk_1073741857_1040 to 127.0.0.1:40343 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:49,147 WARN [Thread-1002 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:37:49,153 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xba73245e90cca3d9 with lease ID 0xfb1b17cde5bf637c: from storage DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae node DatanodeRegistration(127.0.0.1:45275, datanodeUuid=2f069166-49cc-4b43-8675-73d4df62dda0, infoPort=38649, infoSecurePort=0, ipcPort=40671, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132), blocks: 7, hasStaleStorage: false, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-20T20:37:49,153 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xba73245e90cca3d9 with lease ID 0xfb1b17cde5bf637c: from storage DS-d9b9bbc2-356b-4db3-951b-ed9cef04c39e node DatanodeRegistration(127.0.0.1:45275, datanodeUuid=2f069166-49cc-4b43-8675-73d4df62dda0, infoPort=38649, infoSecurePort=0, ipcPort=40671, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:37:49,993 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:50,357 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:50,536 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:51,036 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4e427ce7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36941, datanodeUuid=2b5dcdae-15e4-4944-a72e-22c5ea8ad074, infoPort=38865, infoSecurePort=0, ipcPort=45429, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132):Failed to transfer BP-1565890324-172.17.0.2-1732135040132:blk_1073741887_1070 to 127.0.0.1:38165 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:37:51,993 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:52,229 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T20:37:52,357 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:52,537 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:52,949 ERROR [FSHLog-0-hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData-prefix:d7d59561a64e,38137,1732135042252 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:52,950 WARN [FSHLog-0-hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData-prefix:d7d59561a64e,38137,1732135042252 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:52,950 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog d7d59561a64e%2C38137%2C1732135042252:(num 1732135042557) roll requested 2024-11-20T20:37:52,950 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C38137%2C1732135042252.1732135072950 2024-11-20T20:37:52,953 WARN [Thread-1020 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:52,954 WARN [Thread-1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK], DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK]) is bad. 2024-11-20T20:37:52,954 WARN [Thread-1020 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741888_1071 2024-11-20T20:37:52,955 WARN [Thread-1020 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37843,DS-e964c223-bc83-430b-8acf-39e68464d158,DISK] 2024-11-20T20:37:52,960 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:52,960 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:52,960 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:52,960 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:52,960 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:37:52,961 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135072950 2024-11-20T20:37:52,961 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:52,962 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:52,962 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 2024-11-20T20:37:52,962 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38865:38865),(127.0.0.1/127.0.0.1:38649:38649)] 2024-11-20T20:37:52,962 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 is not closed yet, will try archiving it next time 2024-11-20T20:37:52,962 WARN [IPC Server handler 1 on default port 44749 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741830_1014 2024-11-20T20:37:52,962 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 after 0ms 2024-11-20T20:37:53,994 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:54,358 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:55,994 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:56,358 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:56,964 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 after 4002ms 2024-11-20T20:37:57,995 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:58,359 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:37:59,995 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:00,359 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:01,995 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:02,124 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C39199%2C1732135042427.1732135082124 2024-11-20T20:38:02,134 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,134 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,134 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,135 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,135 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,135 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135066319 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135082124 2024-11-20T20:38:02,137 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38649:38649),(127.0.0.1/127.0.0.1:38865:38865)] 2024-11-20T20:38:02,137 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 is not closed yet, will try archiving it next time 2024-11-20T20:38:02,137 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135066319 is not closed yet, will try archiving it next time 2024-11-20T20:38:02,137 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135064293 to hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/oldWALs/d7d59561a64e%2C39199%2C1732135042427.1732135064293 2024-11-20T20:38:02,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741877_1060 (size=12911) 2024-11-20T20:38:02,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39199 {}] regionserver.HRegion(8855): Flush requested on e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:38:02,142 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e2f07a01674f8c986e38cded8548d2fe 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-20T20:38:02,149 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/28960ef70afa4131bb473ad64b16f195 is 1080, key is row0013/info:/1732135082139/Put/seqid=0 2024-11-20T20:38:02,151 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:02,151 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK], DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]) is bad. 2024-11-20T20:38:02,151 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741891_1075 2024-11-20T20:38:02,152 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK] 2024-11-20T20:38:02,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741892_1076 (size=8190) 2024-11-20T20:38:02,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45275 is added to blk_1073741892_1076 (size=8190) 2024-11-20T20:38:02,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/28960ef70afa4131bb473ad64b16f195 2024-11-20T20:38:02,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/28960ef70afa4131bb473ad64b16f195 as hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/28960ef70afa4131bb473ad64b16f195 2024-11-20T20:38:02,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/28960ef70afa4131bb473ad64b16f195, entries=3, sequenceid=66, filesize=8.0 K 2024-11-20T20:38:02,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for e2f07a01674f8c986e38cded8548d2fe in 49ms, sequenceid=66, compaction requested=false 2024-11-20T20:38:02,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e2f07a01674f8c986e38cded8548d2fe: 2024-11-20T20:38:02,192 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-20T20:38:02,192 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:38:02,192 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/21b78e21c377443f864fc8a7558aaf06 because midkey is the same as first or last row 2024-11-20T20:38:02,360 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:02,360 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-20T20:38:02,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T20:38:02,366 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T20:38:02,366 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:38:02,366 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:38:02,366 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:38:02,367 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T20:38:02,367 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1914738269, stopped=false 2024-11-20T20:38:02,367 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d7d59561a64e,38137,1732135042252 2024-11-20T20:38:02,367 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T20:38:02,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T20:38:02,437 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T20:38:02,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35431-0x10159f738630002, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T20:38:02,437 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:02,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:02,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35431-0x10159f738630002, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:02,437 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T20:38:02,438 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T20:38:02,438 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:38:02,438 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35431-0x10159f738630002, quorum=127.0.0.1:52190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:38:02,438 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:38:02,438 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:38:02,438 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd7d59561a64e,39199,1732135042427' ***** 2024-11-20T20:38:02,439 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T20:38:02,439 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd7d59561a64e,35431,1732135043885' ***** 2024-11-20T20:38:02,439 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T20:38:02,439 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T20:38:02,439 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T20:38:02,439 INFO [RS:0;d7d59561a64e:39199 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T20:38:02,439 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T20:38:02,439 INFO [RS:0;d7d59561a64e:39199 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T20:38:02,439 INFO [RS:1;d7d59561a64e:35431 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T20:38:02,439 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer(3091): Received CLOSE for e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:38:02,439 INFO [RS:1;d7d59561a64e:35431 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T20:38:02,439 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.HRegionServer(959): stopping server d7d59561a64e,35431,1732135043885 2024-11-20T20:38:02,439 INFO [RS:1;d7d59561a64e:35431 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T20:38:02,440 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:38:02,440 INFO [RS:1;d7d59561a64e:35431 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;d7d59561a64e:35431. 2024-11-20T20:38:02,440 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T20:38:02,440 DEBUG [RS:1;d7d59561a64e:35431 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:38:02,440 DEBUG [RS:1;d7d59561a64e:35431 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:38:02,440 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer(959): stopping server d7d59561a64e,39199,1732135042427 2024-11-20T20:38:02,440 INFO [RS:0;d7d59561a64e:39199 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T20:38:02,440 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.HRegionServer(976): stopping server d7d59561a64e,35431,1732135043885; all regions closed. 2024-11-20T20:38:02,440 INFO [RS:0;d7d59561a64e:39199 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d7d59561a64e:39199. 2024-11-20T20:38:02,440 DEBUG [RS:0;d7d59561a64e:39199 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:38:02,440 DEBUG [RS:0;d7d59561a64e:39199 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:38:02,441 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T20:38:02,441 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T20:38:02,441 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T20:38:02,441 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,441 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T20:38:02,441 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,441 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,441 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,441 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,441 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e2f07a01674f8c986e38cded8548d2fe, disabling compactions & flushes 2024-11-20T20:38:02,442 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. 2024-11-20T20:38:02,442 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-20T20:38:02,442 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. 2024-11-20T20:38:02,442 DEBUG [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer(1325): Online Regions={e2f07a01674f8c986e38cded8548d2fe=TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe., 1588230740=hbase:meta,,1.1588230740} 2024-11-20T20:38:02,442 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. after waiting 0 ms 2024-11-20T20:38:02,442 DEBUG [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, e2f07a01674f8c986e38cded8548d2fe 2024-11-20T20:38:02,442 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. 2024-11-20T20:38:02,442 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing e2f07a01674f8c986e38cded8548d2fe 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-20T20:38:02,442 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:02,442 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T20:38:02,442 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T20:38:02,442 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T20:38:02,442 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:02,443 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T20:38:02,443 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 2024-11-20T20:38:02,443 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T20:38:02,443 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-20T20:38:02,443 WARN [IPC Server handler 2 on default port 44749 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 has not been closed. Lease recovery is in progress. RecoveryId = 1077 for block blk_1073741837_1017 2024-11-20T20:38:02,444 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 after 1ms 2024-11-20T20:38:02,444 ERROR [FSHLog-0-hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5-prefix:d7d59561a64e,39199,1732135042427.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:02,444 WARN [FSHLog-0-hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5-prefix:d7d59561a64e,39199,1732135042427.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:02,444 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d7d59561a64e%2C39199%2C1732135042427.meta:.meta(num 1732135043724) roll requested 2024-11-20T20:38:02,444 INFO [regionserver/d7d59561a64e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C39199%2C1732135042427.meta.1732135082444.meta 2024-11-20T20:38:02,448 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/ca76a459e3db4b49b53d79c707e05f09 is 1080, key is row0015/info:/1732135082144/Put/seqid=0 2024-11-20T20:38:02,450 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:02,450 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741894_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK], DatanodeInfoWithStorage[127.0.0.1:45275,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]) is bad. 2024-11-20T20:38:02,450 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741894_1079 2024-11-20T20:38:02,451 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK] 2024-11-20T20:38:02,455 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,455 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,455 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,455 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,455 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45275 is added to blk_1073741895_1080 (size=14660) 2024-11-20T20:38:02,455 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135082444.meta 2024-11-20T20:38:02,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741895_1080 (size=14660) 2024-11-20T20:38:02,456 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:02,456 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44305,DS-7f8e4e7e-dca6-47bc-881e-2df61cbcc60e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:02,456 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta 2024-11-20T20:38:02,456 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/ca76a459e3db4b49b53d79c707e05f09 2024-11-20T20:38:02,457 WARN [IPC Server handler 0 on default port 44749 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta has not been closed. Lease recovery is in progress. RecoveryId = 1081 for block blk_1073741834_1016 2024-11-20T20:38:02,457 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta after 1ms 2024-11-20T20:38:02,457 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38865:38865),(127.0.0.1/127.0.0.1:38649:38649)] 2024-11-20T20:38:02,458 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta is not closed yet, will try archiving it next time 2024-11-20T20:38:02,464 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/.tmp/info/ca76a459e3db4b49b53d79c707e05f09 as hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/ca76a459e3db4b49b53d79c707e05f09 2024-11-20T20:38:02,470 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/ca76a459e3db4b49b53d79c707e05f09, entries=9, sequenceid=78, filesize=14.3 K 2024-11-20T20:38:02,471 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for e2f07a01674f8c986e38cded8548d2fe in 29ms, sequenceid=78, compaction requested=true 2024-11-20T20:38:02,472 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/48e28e19818e41a188ec22710bca8ecd, hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/326d2a2f61cd4147ac1c3c3ab544f287, hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/62d6856d57144aa6bbc8c48928b693e6, hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/c72eb9a950de480baede797b31d4cf17, hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/46b438a775a54c8fb766ada2d58793d9, hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/0dcedfa0f07448048c92d662a8527194] to archive 2024-11-20T20:38:02,474 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T20:38:02,475 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/.tmp/info/546b1240f27c435bb1225b83ecd25a62 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe./info:regioninfo/1732135044419/Put/seqid=0 2024-11-20T20:38:02,476 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/48e28e19818e41a188ec22710bca8ecd to hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/48e28e19818e41a188ec22710bca8ecd 2024-11-20T20:38:02,477 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/326d2a2f61cd4147ac1c3c3ab544f287 to hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/326d2a2f61cd4147ac1c3c3ab544f287 2024-11-20T20:38:02,479 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/62d6856d57144aa6bbc8c48928b693e6 to hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/62d6856d57144aa6bbc8c48928b693e6 2024-11-20T20:38:02,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45275 is added to blk_1073741896_1082 (size=7089) 2024-11-20T20:38:02,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741896_1082 (size=7089) 2024-11-20T20:38:02,483 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/.tmp/info/546b1240f27c435bb1225b83ecd25a62 2024-11-20T20:38:02,483 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/c72eb9a950de480baede797b31d4cf17 to hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/c72eb9a950de480baede797b31d4cf17 2024-11-20T20:38:02,485 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/46b438a775a54c8fb766ada2d58793d9 to hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/46b438a775a54c8fb766ada2d58793d9 2024-11-20T20:38:02,486 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/0dcedfa0f07448048c92d662a8527194 to hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/info/0dcedfa0f07448048c92d662a8527194 2024-11-20T20:38:02,487 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=d7d59561a64e:38137 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-20T20:38:02,487 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [48e28e19818e41a188ec22710bca8ecd=10347, 326d2a2f61cd4147ac1c3c3ab544f287=12506, 62d6856d57144aa6bbc8c48928b693e6=17994, c72eb9a950de480baede797b31d4cf17=6027, 46b438a775a54c8fb766ada2d58793d9=6027, 0dcedfa0f07448048c92d662a8527194=6027] 2024-11-20T20:38:02,491 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e2f07a01674f8c986e38cded8548d2fe/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-20T20:38:02,492 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. 2024-11-20T20:38:02,492 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e2f07a01674f8c986e38cded8548d2fe: Waiting for close lock at 1732135082441Running coprocessor pre-close hooks at 1732135082441Disabling compacts and flushes for region at 1732135082441Disabling writes for close at 1732135082442 (+1 ms)Obtaining lock to block concurrent updates at 1732135082442Preparing flush snapshotting stores in e2f07a01674f8c986e38cded8548d2fe at 1732135082442Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1732135082443 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. at 1732135082444 (+1 ms)Flushing e2f07a01674f8c986e38cded8548d2fe/info: creating writer at 1732135082444Flushing e2f07a01674f8c986e38cded8548d2fe/info: appending metadata at 1732135082448 (+4 ms)Flushing e2f07a01674f8c986e38cded8548d2fe/info: closing flushed file at 1732135082448Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a8f0791: reopening flushed file at 1732135082463 (+15 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for e2f07a01674f8c986e38cded8548d2fe in 29ms, sequenceid=78, compaction requested=true at 1732135082471 (+8 ms)Writing region close event to WAL at 1732135082488 (+17 ms)Running coprocessor post-close hooks at 1732135082492 (+4 ms)Closed at 1732135082492 2024-11-20T20:38:02,492 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732135044031.e2f07a01674f8c986e38cded8548d2fe. 2024-11-20T20:38:02,508 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/.tmp/ns/b3b67644978e468bab8f3b09ff2da625 is 43, key is default/ns:d/1732135043810/Put/seqid=0 2024-11-20T20:38:02,510 WARN [Thread-1060 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:02,510 WARN [Thread-1060 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK], DatanodeInfoWithStorage[127.0.0.1:36941,DS-980f86ee-8f41-4692-9903-d9b67a1c440f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]) is bad. 2024-11-20T20:38:02,510 WARN [Thread-1060 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741897_1083 2024-11-20T20:38:02,511 WARN [Thread-1060 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK] 2024-11-20T20:38:02,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45275 is added to blk_1073741898_1084 (size=5153) 2024-11-20T20:38:02,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741898_1084 (size=5153) 2024-11-20T20:38:02,523 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/.tmp/ns/b3b67644978e468bab8f3b09ff2da625 2024-11-20T20:38:02,538 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 is not closed yet, will try archiving it next time 2024-11-20T20:38:02,538 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135066319 to hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/oldWALs/d7d59561a64e%2C39199%2C1732135042427.1732135066319 2024-11-20T20:38:02,546 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/.tmp/table/12387327d24f480eac8eec48e63b0411 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732135044430/Put/seqid=0 2024-11-20T20:38:02,548 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:02,548 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1565890324-172.17.0.2-1732135040132:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK], DatanodeInfoWithStorage[127.0.0.1:45275,DS-a71ff92b-2412-4d27-b5a5-26cb58ab2dae,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK]) is bad. 2024-11-20T20:38:02,548 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-1565890324-172.17.0.2-1732135040132:blk_1073741899_1085 2024-11-20T20:38:02,548 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38165,DS-5161bad2-271f-4a98-bba4-e91bf5f26eb7,DISK] 2024-11-20T20:38:02,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45275 is added to blk_1073741900_1086 (size=5424) 2024-11-20T20:38:02,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741900_1086 (size=5424) 2024-11-20T20:38:02,555 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/.tmp/table/12387327d24f480eac8eec48e63b0411 2024-11-20T20:38:02,562 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/.tmp/info/546b1240f27c435bb1225b83ecd25a62 as hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/info/546b1240f27c435bb1225b83ecd25a62 2024-11-20T20:38:02,569 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/info/546b1240f27c435bb1225b83ecd25a62, entries=10, sequenceid=11, filesize=6.9 K 2024-11-20T20:38:02,570 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/.tmp/ns/b3b67644978e468bab8f3b09ff2da625 as hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/ns/b3b67644978e468bab8f3b09ff2da625 2024-11-20T20:38:02,577 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/ns/b3b67644978e468bab8f3b09ff2da625, entries=2, sequenceid=11, filesize=5.0 K 2024-11-20T20:38:02,578 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/.tmp/table/12387327d24f480eac8eec48e63b0411 as hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/table/12387327d24f480eac8eec48e63b0411 2024-11-20T20:38:02,585 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/table/12387327d24f480eac8eec48e63b0411, entries=2, sequenceid=11, filesize=5.3 K 2024-11-20T20:38:02,587 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 144ms, sequenceid=11, compaction requested=false 2024-11-20T20:38:02,592 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-20T20:38:02,593 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T20:38:02,593 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T20:38:02,593 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732135082442Running coprocessor pre-close hooks at 1732135082442Disabling compacts and flushes for region at 1732135082442Disabling writes for close at 1732135082443 (+1 ms)Obtaining lock to block concurrent updates at 1732135082443Preparing flush snapshotting stores in 1588230740 at 1732135082443Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732135082444 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732135082458 (+14 ms)Flushing 1588230740/info: creating writer at 1732135082458Flushing 1588230740/info: appending metadata at 1732135082474 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732135082475 (+1 ms)Flushing 1588230740/ns: creating writer at 1732135082489 (+14 ms)Flushing 1588230740/ns: appending metadata at 1732135082508 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1732135082508Flushing 1588230740/table: creating writer at 1732135082530 (+22 ms)Flushing 1588230740/table: appending metadata at 1732135082545 (+15 ms)Flushing 1588230740/table: closing flushed file at 1732135082545Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f13499f: reopening flushed file at 1732135082561 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5cd00120: reopening flushed file at 1732135082569 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a897c73: reopening flushed file at 1732135082577 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 144ms, sequenceid=11, compaction requested=false at 1732135082587 (+10 ms)Writing region close event to WAL at 1732135082588 (+1 ms)Running coprocessor post-close hooks at 1732135082593 (+5 ms)Closed at 1732135082593 2024-11-20T20:38:02,593 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T20:38:02,642 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer(976): stopping server d7d59561a64e,39199,1732135042427; all regions closed. 2024-11-20T20:38:02,643 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,643 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,643 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,643 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,643 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:02,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45275 is added to blk_1073741893_1078 (size=825) 2024-11-20T20:38:02,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741893_1078 (size=825) 2024-11-20T20:38:03,059 INFO [regionserver/d7d59561a64e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T20:38:03,059 INFO [regionserver/d7d59561a64e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T20:38:03,061 INFO [regionserver/d7d59561a64e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T20:38:03,061 INFO [regionserver/d7d59561a64e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T20:38:03,062 INFO [regionserver/d7d59561a64e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T20:38:03,155 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7eb5a740[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45275, datanodeUuid=2f069166-49cc-4b43-8675-73d4df62dda0, infoPort=38649, infoSecurePort=0, ipcPort=40671, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132):Failed to transfer BP-1565890324-172.17.0.2-1732135040132:blk_1073741836_1012 to 127.0.0.1:38165 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:03,155 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@707246e6[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45275, datanodeUuid=2f069166-49cc-4b43-8675-73d4df62dda0, infoPort=38649, infoSecurePort=0, ipcPort=40671, storageInfo=lv=-57;cid=testClusterID;nsid=5296148;c=1732135040132):Failed to transfer BP-1565890324-172.17.0.2-1732135040132:blk_1073741832_1008 to 127.0.0.1:38165 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:03,990 INFO [regionserver/d7d59561a64e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T20:38:04,180 INFO [master/d7d59561a64e:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T20:38:04,181 INFO [master/d7d59561a64e:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T20:38:04,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741828_1004 (size=1189) 2024-11-20T20:38:04,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741826_1002 (size=42) 2024-11-20T20:38:06,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45275 is added to blk_1073741877_1060 (size=12911) 2024-11-20T20:38:06,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741827_1003 (size=196) 2024-11-20T20:38:06,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741829_1005 (size=34) 2024-11-20T20:38:06,445 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 after 4001ms 2024-11-20T20:38:06,458 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta after 4002ms 2024-11-20T20:38:06,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-20T20:38:06,487 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T20:38:06,487 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T20:38:07,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741825_1001 (size=7) 2024-11-20T20:38:07,442 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-20T20:38:07,446 DEBUG [RS:1;d7d59561a64e:35431 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/oldWALs 2024-11-20T20:38:07,446 INFO [RS:1;d7d59561a64e:35431 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d7d59561a64e%2C35431%2C1732135043885:(num 1732135044133) 2024-11-20T20:38:07,446 DEBUG [RS:1;d7d59561a64e:35431 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:38:07,446 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T20:38:07,446 INFO [RS:1;d7d59561a64e:35431 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T20:38:07,447 INFO [RS:1;d7d59561a64e:35431 {}] hbase.ChoreService(370): Chore service for: regionserver/d7d59561a64e:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T20:38:07,447 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T20:38:07,447 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T20:38:07,447 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T20:38:07,447 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T20:38:07,447 INFO [RS:1;d7d59561a64e:35431 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T20:38:07,447 INFO [RS:1;d7d59561a64e:35431 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35431 2024-11-20T20:38:07,493 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:07,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:07,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:07,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:07,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:07,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:07,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35431-0x10159f738630002, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d7d59561a64e,35431,1732135043885 2024-11-20T20:38:07,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T20:38:07,521 INFO [RS:1;d7d59561a64e:35431 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T20:38:07,526 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:07,526 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:07,531 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d7d59561a64e,35431,1732135043885] 2024-11-20T20:38:07,541 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d7d59561a64e,35431,1732135043885 already deleted, retry=false 2024-11-20T20:38:07,541 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d7d59561a64e,35431,1732135043885 expired; onlineServers=1 2024-11-20T20:38:07,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35431-0x10159f738630002, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:38:07,631 INFO [RS:1;d7d59561a64e:35431 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T20:38:07,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35431-0x10159f738630002, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:38:07,631 INFO [RS:1;d7d59561a64e:35431 {}] regionserver.HRegionServer(1031): Exiting; stopping=d7d59561a64e,35431,1732135043885; zookeeper connection closed. 2024-11-20T20:38:07,632 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@58e63d5e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@58e63d5e 2024-11-20T20:38:07,644 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-20T20:38:07,649 DEBUG [RS:0;d7d59561a64e:39199 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/oldWALs 2024-11-20T20:38:07,649 INFO [RS:0;d7d59561a64e:39199 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d7d59561a64e%2C39199%2C1732135042427.meta:.meta(num 1732135082444) 2024-11-20T20:38:07,649 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:07,650 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:07,650 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:07,650 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:07,650 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:07,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741890_1074 (size=14682) 2024-11-20T20:38:07,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45275 is added to blk_1073741890_1074 (size=14682) 2024-11-20T20:38:08,028 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T20:38:08,055 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:08,056 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:08,056 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:08,057 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:08,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:08,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:08,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:08,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:08,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:08,458 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-20T20:38:08,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:09,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:09,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:10,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:10,458 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-20T20:38:10,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:11,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:11,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:12,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741831_1007 (size=1321) 2024-11-20T20:38:12,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741835_1011 (size=393) 2024-11-20T20:38:12,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:12,459 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-20T20:38:12,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:12,651 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-20T20:38:12,655 DEBUG [RS:0;d7d59561a64e:39199 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/oldWALs 2024-11-20T20:38:12,655 INFO [RS:0;d7d59561a64e:39199 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d7d59561a64e%2C39199%2C1732135042427:(num 1732135082124) 2024-11-20T20:38:12,655 DEBUG [RS:0;d7d59561a64e:39199 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:38:12,655 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T20:38:12,655 INFO [RS:0;d7d59561a64e:39199 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T20:38:12,655 INFO [RS:0;d7d59561a64e:39199 {}] hbase.ChoreService(370): Chore service for: regionserver/d7d59561a64e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T20:38:12,656 INFO [RS:0;d7d59561a64e:39199 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T20:38:12,656 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T20:38:12,656 INFO [RS:0;d7d59561a64e:39199 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39199 2024-11-20T20:38:12,720 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d7d59561a64e,39199,1732135042427 2024-11-20T20:38:12,720 INFO [RS:0;d7d59561a64e:39199 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T20:38:12,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T20:38:12,731 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d7d59561a64e,39199,1732135042427] 2024-11-20T20:38:12,741 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d7d59561a64e,39199,1732135042427 already deleted, retry=false 2024-11-20T20:38:12,741 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d7d59561a64e,39199,1732135042427 expired; onlineServers=0 2024-11-20T20:38:12,741 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd7d59561a64e,38137,1732135042252' ***** 2024-11-20T20:38:12,742 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T20:38:12,742 INFO [M:0;d7d59561a64e:38137 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T20:38:12,742 INFO [M:0;d7d59561a64e:38137 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T20:38:12,742 DEBUG [M:0;d7d59561a64e:38137 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T20:38:12,742 DEBUG [M:0;d7d59561a64e:38137 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T20:38:12,742 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T20:38:12,742 DEBUG [master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135042951 {}] cleaner.HFileCleaner(306): Exit Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135042951,5,FailOnTimeoutGroup] 2024-11-20T20:38:12,742 INFO [M:0;d7d59561a64e:38137 {}] hbase.ChoreService(370): Chore service for: master/d7d59561a64e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T20:38:12,742 INFO [M:0;d7d59561a64e:38137 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T20:38:12,742 DEBUG [M:0;d7d59561a64e:38137 {}] master.HMaster(1795): Stopping service threads 2024-11-20T20:38:12,742 INFO [M:0;d7d59561a64e:38137 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T20:38:12,742 INFO [M:0;d7d59561a64e:38137 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T20:38:12,743 INFO [M:0;d7d59561a64e:38137 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T20:38:12,743 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T20:38:12,743 DEBUG [master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135042951 {}] cleaner.HFileCleaner(306): Exit Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135042951,5,FailOnTimeoutGroup] 2024-11-20T20:38:12,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T20:38:12,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:12,763 INFO [M:0;d7d59561a64e:38137 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/.lastflushedseqids 2024-11-20T20:38:12,765 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-20T20:38:12,765 DEBUG [RegionServerTracker-0 {}] master.ActiveMasterManager(353): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-20T20:38:12,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45275 is added to blk_1073741901_1087 (size=130) 2024-11-20T20:38:12,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741901_1087 (size=130) 2024-11-20T20:38:12,775 INFO [M:0;d7d59561a64e:38137 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T20:38:12,775 INFO [M:0;d7d59561a64e:38137 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T20:38:12,776 DEBUG [M:0;d7d59561a64e:38137 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T20:38:12,776 INFO [M:0;d7d59561a64e:38137 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:38:12,776 DEBUG [M:0;d7d59561a64e:38137 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:38:12,776 DEBUG [M:0;d7d59561a64e:38137 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T20:38:12,776 DEBUG [M:0;d7d59561a64e:38137 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:38:12,776 INFO [M:0;d7d59561a64e:38137 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-20T20:38:12,814 DEBUG [M:0;d7d59561a64e:38137 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a295f6a7b7a045cbb736462b2e0791a9 is 82, key is hbase:meta,,1/info:regioninfo/1732135043756/Put/seqid=0 2024-11-20T20:38:12,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45275 is added to blk_1073741902_1088 (size=5672) 2024-11-20T20:38:12,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741902_1088 (size=5672) 2024-11-20T20:38:12,829 INFO [M:0;d7d59561a64e:38137 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a295f6a7b7a045cbb736462b2e0791a9 2024-11-20T20:38:12,831 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:38:12,832 INFO [RS:0;d7d59561a64e:39199 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T20:38:12,832 INFO [RS:0;d7d59561a64e:39199 {}] regionserver.HRegionServer(1031): Exiting; stopping=d7d59561a64e,39199,1732135042427; zookeeper connection closed. 2024-11-20T20:38:12,832 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39199-0x10159f738630001, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:38:12,832 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@164354c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@164354c 2024-11-20T20:38:12,832 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-20T20:38:12,853 DEBUG [M:0;d7d59561a64e:38137 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/401aac2d964a43f5a34721f349bb9990 is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732135044435/Put/seqid=0 2024-11-20T20:38:12,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45275 is added to blk_1073741903_1089 (size=6256) 2024-11-20T20:38:12,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741903_1089 (size=6256) 2024-11-20T20:38:12,863 INFO [M:0;d7d59561a64e:38137 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/401aac2d964a43f5a34721f349bb9990 2024-11-20T20:38:12,870 INFO [M:0;d7d59561a64e:38137 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 401aac2d964a43f5a34721f349bb9990 2024-11-20T20:38:12,885 DEBUG [M:0;d7d59561a64e:38137 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1fd60de5610643e8a28603b9b4a87867 is 69, key is d7d59561a64e,35431,1732135043885/rs:state/1732135043966/Put/seqid=0 2024-11-20T20:38:12,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741904_1090 (size=5224) 2024-11-20T20:38:12,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45275 is added to blk_1073741904_1090 (size=5224) 2024-11-20T20:38:12,891 INFO [M:0;d7d59561a64e:38137 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1fd60de5610643e8a28603b9b4a87867 2024-11-20T20:38:12,917 DEBUG [M:0;d7d59561a64e:38137 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1ce954dfcadc405cb5ce9447a6aaa450 is 52, key is load_balancer_on/state:d/1732135043864/Put/seqid=0 2024-11-20T20:38:12,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45275 is added to blk_1073741905_1091 (size=5056) 2024-11-20T20:38:12,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741905_1091 (size=5056) 2024-11-20T20:38:13,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:13,326 INFO [M:0;d7d59561a64e:38137 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1ce954dfcadc405cb5ce9447a6aaa450 2024-11-20T20:38:13,337 DEBUG [M:0;d7d59561a64e:38137 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a295f6a7b7a045cbb736462b2e0791a9 as hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a295f6a7b7a045cbb736462b2e0791a9 2024-11-20T20:38:13,343 INFO [M:0;d7d59561a64e:38137 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a295f6a7b7a045cbb736462b2e0791a9, entries=8, sequenceid=60, filesize=5.5 K 2024-11-20T20:38:13,345 DEBUG [M:0;d7d59561a64e:38137 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/401aac2d964a43f5a34721f349bb9990 as hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/401aac2d964a43f5a34721f349bb9990 2024-11-20T20:38:13,352 INFO [M:0;d7d59561a64e:38137 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 401aac2d964a43f5a34721f349bb9990 2024-11-20T20:38:13,352 INFO [M:0;d7d59561a64e:38137 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/401aac2d964a43f5a34721f349bb9990, entries=6, sequenceid=60, filesize=6.1 K 2024-11-20T20:38:13,354 DEBUG [M:0;d7d59561a64e:38137 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1fd60de5610643e8a28603b9b4a87867 as hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1fd60de5610643e8a28603b9b4a87867 2024-11-20T20:38:13,360 INFO [M:0;d7d59561a64e:38137 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1fd60de5610643e8a28603b9b4a87867, entries=2, sequenceid=60, filesize=5.1 K 2024-11-20T20:38:13,361 DEBUG [M:0;d7d59561a64e:38137 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1ce954dfcadc405cb5ce9447a6aaa450 as hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1ce954dfcadc405cb5ce9447a6aaa450 2024-11-20T20:38:13,368 INFO [M:0;d7d59561a64e:38137 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1ce954dfcadc405cb5ce9447a6aaa450, entries=1, sequenceid=60, filesize=4.9 K 2024-11-20T20:38:13,369 INFO [M:0;d7d59561a64e:38137 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 593ms, sequenceid=60, compaction requested=false 2024-11-20T20:38:13,371 INFO [M:0;d7d59561a64e:38137 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:38:13,371 DEBUG [M:0;d7d59561a64e:38137 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732135092775Disabling compacts and flushes for region at 1732135092775Disabling writes for close at 1732135092776 (+1 ms)Obtaining lock to block concurrent updates at 1732135092776Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732135092776Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1732135092776Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732135092777 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732135092777Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732135092813 (+36 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732135092814 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732135092837 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732135092852 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732135092852Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732135092870 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732135092885 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732135092885Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732135092897 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732135092917 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732135092917Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65efa54b: reopening flushed file at 1732135093335 (+418 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@70061c3a: reopening flushed file at 1732135093344 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3840a705: reopening flushed file at 1732135093352 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@414629d9: reopening flushed file at 1732135093360 (+8 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 593ms, sequenceid=60, compaction requested=false at 1732135093369 (+9 ms)Writing region close event to WAL at 1732135093371 (+2 ms)Closed at 1732135093371 2024-11-20T20:38:13,373 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:13,373 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:13,373 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:13,373 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:13,373 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:13,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45275 is added to blk_1073741889_1072 (size=1045) 2024-11-20T20:38:13,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36941 is added to blk_1073741889_1072 (size=1045) 2024-11-20T20:38:13,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:13,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:14,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:14,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:14,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:15,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:15,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:15,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:16,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:16,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:16,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:16,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T20:38:17,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:17,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:17,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:18,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:18,374 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-20T20:38:18,374 INFO [M:0;d7d59561a64e:38137 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T20:38:18,374 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T20:38:18,374 INFO [M:0;d7d59561a64e:38137 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38137 2024-11-20T20:38:18,374 INFO [M:0;d7d59561a64e:38137 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T20:38:18,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:18,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:18,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:38:18,547 INFO [M:0;d7d59561a64e:38137 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T20:38:18,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38137-0x10159f738630000, quorum=127.0.0.1:52190, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:38:18,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2d144912{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:38:18,584 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4982983{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:38:18,584 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:38:18,585 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44bf135d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:38:18,585 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67b1d1e5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.log.dir/,STOPPED} 2024-11-20T20:38:18,587 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:38:18,587 WARN [BP-1565890324-172.17.0.2-1732135040132 heartbeating to localhost/127.0.0.1:44749 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:38:18,587 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:38:18,587 WARN [BP-1565890324-172.17.0.2-1732135040132 heartbeating to localhost/127.0.0.1:44749 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1565890324-172.17.0.2-1732135040132 (Datanode Uuid 2f069166-49cc-4b43-8675-73d4df62dda0) service to localhost/127.0.0.1:44749 2024-11-20T20:38:18,587 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data3/current/BP-1565890324-172.17.0.2-1732135040132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:38:18,588 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data4/current/BP-1565890324-172.17.0.2-1732135040132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:38:18,588 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:38:18,590 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2305d82d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:38:18,590 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3f334db9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:38:18,591 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:38:18,591 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4273ed86{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:38:18,591 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24a7a164{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.log.dir/,STOPPED} 2024-11-20T20:38:18,592 WARN [BP-1565890324-172.17.0.2-1732135040132 heartbeating to localhost/127.0.0.1:44749 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:38:18,592 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:38:18,592 WARN [BP-1565890324-172.17.0.2-1732135040132 heartbeating to localhost/127.0.0.1:44749 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1565890324-172.17.0.2-1732135040132 (Datanode Uuid 2b5dcdae-15e4-4944-a72e-22c5ea8ad074) service to localhost/127.0.0.1:44749 2024-11-20T20:38:18,592 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:38:18,593 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data7/current/BP-1565890324-172.17.0.2-1732135040132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:38:18,593 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/cluster_392edd7c-ab26-b9b6-d306-263886d427b4/data/data8/current/BP-1565890324-172.17.0.2-1732135040132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:38:18,594 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:38:18,600 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@279f484a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T20:38:18,601 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@40334b2a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:38:18,601 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:38:18,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d3a3315{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:38:18,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@52b2db2b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.log.dir/,STOPPED} 2024-11-20T20:38:18,610 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T20:38:18,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T20:38:18,668 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=156 (was 83) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:44749 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40197 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44749 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$894/0x00007f0f5cbf4928.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44749 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:44749 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:44749 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$894/0x00007f0f5cbf4928.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$894/0x00007f0f5cbf4928.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44749 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:44749 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44749 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:40197 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:44749 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$894/0x00007f0f5cbf4928.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=429 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=305 (was 450), ProcessCount=11 (was 11), AvailableMemoryMB=1063 (was 2019) 2024-11-20T20:38:18,677 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=156, OpenFileDescriptor=429, MaxFileDescriptor=1048576, SystemLoadAverage=305, ProcessCount=11, AvailableMemoryMB=1062 2024-11-20T20:38:18,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T20:38:18,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.log.dir so I do NOT create it in target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648 2024-11-20T20:38:18,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3776ac48-5324-2c10-c9b8-07d7f271f761/hadoop.tmp.dir so I do NOT create it in target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648 2024-11-20T20:38:18,678 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df, deleteOnExit=true 2024-11-20T20:38:18,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T20:38:18,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/test.cache.data in system properties and HBase conf 2024-11-20T20:38:18,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T20:38:18,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.log.dir in system properties and HBase conf 2024-11-20T20:38:18,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T20:38:18,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T20:38:18,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T20:38:18,679 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T20:38:18,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T20:38:18,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T20:38:18,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T20:38:18,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T20:38:18,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T20:38:18,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T20:38:18,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T20:38:18,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T20:38:18,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T20:38:18,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/nfs.dump.dir in system properties and HBase conf 2024-11-20T20:38:18,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/java.io.tmpdir in system properties and HBase conf 2024-11-20T20:38:18,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T20:38:18,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T20:38:18,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T20:38:18,697 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T20:38:18,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:19,072 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:38:19,077 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:38:19,078 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:38:19,078 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:38:19,079 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T20:38:19,079 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:38:19,080 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@115eaf6c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:38:19,080 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@209ebed6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:38:19,199 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@585ec7cf{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/java.io.tmpdir/jetty-localhost-40269-hadoop-hdfs-3_4_1-tests_jar-_-any-13406251396236523319/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T20:38:19,200 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@27bc6094{HTTP/1.1, (http/1.1)}{localhost:40269} 2024-11-20T20:38:19,200 INFO [Time-limited test {}] server.Server(415): Started @172907ms 2024-11-20T20:38:19,215 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T20:38:19,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:19,432 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:38:19,436 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:38:19,436 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:38:19,436 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:38:19,436 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T20:38:19,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9dc8204{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:38:19,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21e3a41b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:38:19,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:19,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:19,538 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@417b29aa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/java.io.tmpdir/jetty-localhost-37049-hadoop-hdfs-3_4_1-tests_jar-_-any-2520074555904073323/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:38:19,539 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@350106ae{HTTP/1.1, (http/1.1)}{localhost:37049} 2024-11-20T20:38:19,539 INFO [Time-limited test {}] server.Server(415): Started @173246ms 2024-11-20T20:38:19,540 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:38:19,570 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:38:19,574 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:38:19,576 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:38:19,576 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:38:19,576 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T20:38:19,576 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53d4da47{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:38:19,577 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7bb6ca3f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:38:19,685 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@598d9c74{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/java.io.tmpdir/jetty-localhost-33059-hadoop-hdfs-3_4_1-tests_jar-_-any-9382417172143478705/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:38:19,685 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@25c074d6{HTTP/1.1, (http/1.1)}{localhost:33059} 2024-11-20T20:38:19,685 INFO [Time-limited test {}] server.Server(415): Started @173392ms 2024-11-20T20:38:19,687 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:38:19,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:20,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:20,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:20,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:20,737 WARN [Thread-1204 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/data/data1/current/BP-1325861705-172.17.0.2-1732135098701/current, will proceed with Du for space computation calculation, 2024-11-20T20:38:20,737 WARN [Thread-1205 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/data/data2/current/BP-1325861705-172.17.0.2-1732135098701/current, will proceed with Du for space computation calculation, 2024-11-20T20:38:20,757 WARN [Thread-1168 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:38:20,759 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc38f5a244e3a7bb8 with lease ID 0x1b952dc97de12094: Processing first storage report for DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b from datanode DatanodeRegistration(127.0.0.1:37297, datanodeUuid=ce80fef2-4f68-4977-8d41-3f9d66556ca9, infoPort=39591, infoSecurePort=0, ipcPort=42693, storageInfo=lv=-57;cid=testClusterID;nsid=2028321080;c=1732135098701) 2024-11-20T20:38:20,760 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc38f5a244e3a7bb8 with lease ID 0x1b952dc97de12094: from storage DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b node DatanodeRegistration(127.0.0.1:37297, datanodeUuid=ce80fef2-4f68-4977-8d41-3f9d66556ca9, infoPort=39591, infoSecurePort=0, ipcPort=42693, storageInfo=lv=-57;cid=testClusterID;nsid=2028321080;c=1732135098701), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:38:20,760 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc38f5a244e3a7bb8 with lease ID 0x1b952dc97de12094: Processing first storage report for DS-c42ca20d-e1ea-4296-98f5-4e2afec944e1 from datanode DatanodeRegistration(127.0.0.1:37297, datanodeUuid=ce80fef2-4f68-4977-8d41-3f9d66556ca9, infoPort=39591, infoSecurePort=0, ipcPort=42693, storageInfo=lv=-57;cid=testClusterID;nsid=2028321080;c=1732135098701) 2024-11-20T20:38:20,760 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc38f5a244e3a7bb8 with lease ID 0x1b952dc97de12094: from storage DS-c42ca20d-e1ea-4296-98f5-4e2afec944e1 node DatanodeRegistration(127.0.0.1:37297, datanodeUuid=ce80fef2-4f68-4977-8d41-3f9d66556ca9, infoPort=39591, infoSecurePort=0, ipcPort=42693, storageInfo=lv=-57;cid=testClusterID;nsid=2028321080;c=1732135098701), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:38:20,867 WARN [Thread-1215 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/data/data3/current/BP-1325861705-172.17.0.2-1732135098701/current, will proceed with Du for space computation calculation, 2024-11-20T20:38:20,867 WARN [Thread-1216 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/data/data4/current/BP-1325861705-172.17.0.2-1732135098701/current, will proceed with Du for space computation calculation, 2024-11-20T20:38:20,886 WARN [Thread-1191 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:38:20,888 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7e870a0fbb8f4ed with lease ID 0x1b952dc97de12095: Processing first storage report for DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face from datanode DatanodeRegistration(127.0.0.1:43621, datanodeUuid=9d803552-fc36-44b4-af02-b5fdff5dea2b, infoPort=41139, infoSecurePort=0, ipcPort=44673, storageInfo=lv=-57;cid=testClusterID;nsid=2028321080;c=1732135098701) 2024-11-20T20:38:20,888 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7e870a0fbb8f4ed with lease ID 0x1b952dc97de12095: from storage DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face node DatanodeRegistration(127.0.0.1:43621, datanodeUuid=9d803552-fc36-44b4-af02-b5fdff5dea2b, infoPort=41139, infoSecurePort=0, ipcPort=44673, storageInfo=lv=-57;cid=testClusterID;nsid=2028321080;c=1732135098701), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:38:20,888 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7e870a0fbb8f4ed with lease ID 0x1b952dc97de12095: Processing first storage report for DS-5fd445f0-f492-4a9e-8042-eb2b08e31f18 from datanode DatanodeRegistration(127.0.0.1:43621, datanodeUuid=9d803552-fc36-44b4-af02-b5fdff5dea2b, infoPort=41139, infoSecurePort=0, ipcPort=44673, storageInfo=lv=-57;cid=testClusterID;nsid=2028321080;c=1732135098701) 2024-11-20T20:38:20,888 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7e870a0fbb8f4ed with lease ID 0x1b952dc97de12095: from storage DS-5fd445f0-f492-4a9e-8042-eb2b08e31f18 node DatanodeRegistration(127.0.0.1:43621, datanodeUuid=9d803552-fc36-44b4-af02-b5fdff5dea2b, infoPort=41139, infoSecurePort=0, ipcPort=44673, storageInfo=lv=-57;cid=testClusterID;nsid=2028321080;c=1732135098701), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:38:20,928 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648 2024-11-20T20:38:20,931 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/zookeeper_0, clientPort=59087, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T20:38:20,932 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59087 2024-11-20T20:38:20,932 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:38:20,934 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:38:20,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43621 is added to blk_1073741825_1001 (size=7) 2024-11-20T20:38:20,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37297 is added to blk_1073741825_1001 (size=7) 2024-11-20T20:38:20,946 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3 with version=8 2024-11-20T20:38:20,947 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/hbase-staging 2024-11-20T20:38:20,949 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d7d59561a64e:0 server-side Connection retries=45 2024-11-20T20:38:20,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:38:20,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T20:38:20,949 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T20:38:20,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:38:20,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T20:38:20,949 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T20:38:20,949 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T20:38:20,950 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38997 2024-11-20T20:38:20,952 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38997 connecting to ZooKeeper ensemble=127.0.0.1:59087 2024-11-20T20:38:20,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:21,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:389970x0, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T20:38:21,044 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38997-0x10159f81dae0000 connected 2024-11-20T20:38:21,120 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:38:21,121 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:38:21,123 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:38:21,124 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3, hbase.cluster.distributed=false 2024-11-20T20:38:21,125 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T20:38:21,126 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38997 2024-11-20T20:38:21,126 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38997 2024-11-20T20:38:21,126 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38997 2024-11-20T20:38:21,127 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38997 2024-11-20T20:38:21,127 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38997 2024-11-20T20:38:21,148 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d7d59561a64e:0 server-side Connection retries=45 2024-11-20T20:38:21,148 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:38:21,148 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T20:38:21,148 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T20:38:21,148 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:38:21,148 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T20:38:21,148 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T20:38:21,149 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T20:38:21,152 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34897 2024-11-20T20:38:21,155 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34897 connecting to ZooKeeper ensemble=127.0.0.1:59087 2024-11-20T20:38:21,155 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:38:21,158 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:38:21,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:348970x0, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T20:38:21,173 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:348970x0, quorum=127.0.0.1:59087, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:38:21,173 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34897-0x10159f81dae0001 connected 2024-11-20T20:38:21,173 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T20:38:21,173 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T20:38:21,174 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T20:38:21,175 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T20:38:21,175 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34897 2024-11-20T20:38:21,175 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34897 2024-11-20T20:38:21,175 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34897 2024-11-20T20:38:21,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34897 2024-11-20T20:38:21,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34897 2024-11-20T20:38:21,190 DEBUG [M:0;d7d59561a64e:38997 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d7d59561a64e:38997 2024-11-20T20:38:21,190 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d7d59561a64e,38997,1732135100948 2024-11-20T20:38:21,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:38:21,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:38:21,203 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d7d59561a64e,38997,1732135100948 2024-11-20T20:38:21,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T20:38:21,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:21,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:21,215 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T20:38:21,215 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d7d59561a64e,38997,1732135100948 from backup master directory 2024-11-20T20:38:21,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d7d59561a64e,38997,1732135100948 2024-11-20T20:38:21,225 WARN [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T20:38:21,225 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d7d59561a64e,38997,1732135100948 2024-11-20T20:38:21,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:38:21,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:38:21,229 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/hbase.id] with ID: 00af26f4-346c-469a-a070-87bef77651cf 2024-11-20T20:38:21,229 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/.tmp/hbase.id 2024-11-20T20:38:21,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37297 is added to blk_1073741826_1002 (size=42) 2024-11-20T20:38:21,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43621 is added to blk_1073741826_1002 (size=42) 2024-11-20T20:38:21,236 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/.tmp/hbase.id]:[hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/hbase.id] 2024-11-20T20:38:21,249 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:38:21,249 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T20:38:21,251 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-20T20:38:21,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:21,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:21,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37297 is added to blk_1073741827_1003 (size=196) 2024-11-20T20:38:21,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43621 is added to blk_1073741827_1003 (size=196) 2024-11-20T20:38:21,264 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T20:38:21,265 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T20:38:21,265 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:38:21,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43621 is added to blk_1073741828_1004 (size=1189) 2024-11-20T20:38:21,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37297 is added to blk_1073741828_1004 (size=1189) 2024-11-20T20:38:21,273 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store 2024-11-20T20:38:21,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37297 is added to blk_1073741829_1005 (size=34) 2024-11-20T20:38:21,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43621 is added to blk_1073741829_1005 (size=34) 2024-11-20T20:38:21,284 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:38:21,284 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T20:38:21,284 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:38:21,284 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:38:21,284 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T20:38:21,284 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:38:21,284 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:38:21,284 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732135101284Disabling compacts and flushes for region at 1732135101284Disabling writes for close at 1732135101284Writing region close event to WAL at 1732135101284Closed at 1732135101284 2024-11-20T20:38:21,285 WARN [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/.initializing 2024-11-20T20:38:21,285 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/WALs/d7d59561a64e,38997,1732135100948 2024-11-20T20:38:21,288 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C38997%2C1732135100948, suffix=, logDir=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/WALs/d7d59561a64e,38997,1732135100948, archiveDir=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/oldWALs, maxLogs=10 2024-11-20T20:38:21,288 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C38997%2C1732135100948.1732135101288 2024-11-20T20:38:21,294 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/WALs/d7d59561a64e,38997,1732135100948/d7d59561a64e%2C38997%2C1732135100948.1732135101288 2024-11-20T20:38:21,294 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39591:39591),(127.0.0.1/127.0.0.1:41139:41139)] 2024-11-20T20:38:21,295 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:38:21,295 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:38:21,295 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:21,295 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:21,297 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:21,298 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T20:38:21,298 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:21,298 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:21,298 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:21,300 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T20:38:21,300 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:21,300 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:38:21,300 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:21,301 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T20:38:21,302 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:21,302 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:38:21,302 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:21,303 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T20:38:21,303 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:21,304 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:38:21,304 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:21,305 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:21,305 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:21,307 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:21,307 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:21,307 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T20:38:21,309 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:21,314 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:38:21,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:21,315 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778959, jitterRate=-0.009502708911895752}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T20:38:21,315 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732135101295Initializing all the Stores at 1732135101296 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135101296Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135101296Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135101296Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135101296Cleaning up temporary data from old regions at 1732135101307 (+11 ms)Region opened successfully at 1732135101315 (+8 ms) 2024-11-20T20:38:21,316 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T20:38:21,320 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40a8c220, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d7d59561a64e/172.17.0.2:0 2024-11-20T20:38:21,321 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T20:38:21,321 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T20:38:21,321 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T20:38:21,321 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T20:38:21,322 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T20:38:21,322 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T20:38:21,322 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T20:38:21,325 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T20:38:21,326 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T20:38:21,330 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T20:38:21,330 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T20:38:21,331 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T20:38:21,340 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T20:38:21,341 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T20:38:21,343 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T20:38:21,351 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T20:38:21,352 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T20:38:21,361 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T20:38:21,364 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T20:38:21,372 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T20:38:21,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T20:38:21,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T20:38:21,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:21,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:21,383 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d7d59561a64e,38997,1732135100948, sessionid=0x10159f81dae0000, setting cluster-up flag (Was=false) 2024-11-20T20:38:21,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:21,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:21,435 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T20:38:21,436 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d7d59561a64e,38997,1732135100948 2024-11-20T20:38:21,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:21,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:21,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:21,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:21,488 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T20:38:21,489 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d7d59561a64e,38997,1732135100948 2024-11-20T20:38:21,490 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T20:38:21,492 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T20:38:21,493 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T20:38:21,493 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T20:38:21,493 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d7d59561a64e,38997,1732135100948 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T20:38:21,495 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:38:21,495 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:38:21,495 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:38:21,495 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:38:21,495 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d7d59561a64e:0, corePoolSize=10, maxPoolSize=10 2024-11-20T20:38:21,495 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:21,495 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d7d59561a64e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T20:38:21,495 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:21,496 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732135131496 2024-11-20T20:38:21,497 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T20:38:21,497 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T20:38:21,497 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T20:38:21,497 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T20:38:21,497 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T20:38:21,497 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T20:38:21,497 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:21,498 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T20:38:21,498 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T20:38:21,498 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T20:38:21,498 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:38:21,498 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T20:38:21,498 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T20:38:21,498 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T20:38:21,499 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135101498,5,FailOnTimeoutGroup] 2024-11-20T20:38:21,499 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135101499,5,FailOnTimeoutGroup] 2024-11-20T20:38:21,499 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:21,499 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T20:38:21,499 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:21,499 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:21,499 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:21,500 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T20:38:21,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:21,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:21,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:21,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:21,505 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:21,505 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:21,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:21,510 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:21,510 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:21,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43621 is added to blk_1073741831_1007 (size=1321) 2024-11-20T20:38:21,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37297 is added to blk_1073741831_1007 (size=1321) 2024-11-20T20:38:21,512 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:21,513 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T20:38:21,513 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3 2024-11-20T20:38:21,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43621 is added to blk_1073741832_1008 (size=32) 2024-11-20T20:38:21,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37297 is added to blk_1073741832_1008 (size=32) 2024-11-20T20:38:21,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:38:21,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T20:38:21,523 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T20:38:21,523 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:21,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:21,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T20:38:21,525 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T20:38:21,525 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:21,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:21,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T20:38:21,527 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T20:38:21,527 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:21,528 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:21,528 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T20:38:21,529 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T20:38:21,529 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:21,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:21,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T20:38:21,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740 2024-11-20T20:38:21,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740 2024-11-20T20:38:21,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T20:38:21,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T20:38:21,532 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T20:38:21,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T20:38:21,536 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:38:21,537 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849380, jitterRate=0.08004362881183624}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T20:38:21,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732135101521Initializing all the Stores at 1732135101522 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135101522Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135101522Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135101522Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135101522Cleaning up temporary data from old regions at 1732135101532 (+10 ms)Region opened successfully at 1732135101538 (+6 ms) 2024-11-20T20:38:21,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T20:38:21,538 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T20:38:21,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T20:38:21,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T20:38:21,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T20:38:21,539 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T20:38:21,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732135101538Disabling compacts and flushes for region at 1732135101538Disabling writes for close at 1732135101538Writing region close event to WAL at 1732135101539 (+1 ms)Closed at 1732135101539 2024-11-20T20:38:21,540 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:38:21,540 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T20:38:21,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T20:38:21,542 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T20:38:21,544 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T20:38:21,578 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer(746): ClusterId : 00af26f4-346c-469a-a070-87bef77651cf 2024-11-20T20:38:21,578 DEBUG [RS:0;d7d59561a64e:34897 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T20:38:21,583 DEBUG [RS:0;d7d59561a64e:34897 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T20:38:21,583 DEBUG [RS:0;d7d59561a64e:34897 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T20:38:21,594 DEBUG [RS:0;d7d59561a64e:34897 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T20:38:21,594 DEBUG [RS:0;d7d59561a64e:34897 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5eaa6a0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d7d59561a64e/172.17.0.2:0 2024-11-20T20:38:21,608 DEBUG [RS:0;d7d59561a64e:34897 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d7d59561a64e:34897 2024-11-20T20:38:21,608 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T20:38:21,608 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T20:38:21,608 DEBUG [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T20:38:21,609 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer(2659): reportForDuty to master=d7d59561a64e,38997,1732135100948 with port=34897, startcode=1732135101148 2024-11-20T20:38:21,609 DEBUG [RS:0;d7d59561a64e:34897 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T20:38:21,611 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44927, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T20:38:21,612 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38997 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d7d59561a64e,34897,1732135101148 2024-11-20T20:38:21,612 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38997 {}] master.ServerManager(517): Registering regionserver=d7d59561a64e,34897,1732135101148 2024-11-20T20:38:21,614 DEBUG [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3 2024-11-20T20:38:21,614 DEBUG [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38981 2024-11-20T20:38:21,614 DEBUG [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T20:38:21,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T20:38:21,625 DEBUG [RS:0;d7d59561a64e:34897 {}] zookeeper.ZKUtil(111): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d7d59561a64e,34897,1732135101148 2024-11-20T20:38:21,625 WARN [RS:0;d7d59561a64e:34897 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T20:38:21,625 INFO [RS:0;d7d59561a64e:34897 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:38:21,625 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d7d59561a64e,34897,1732135101148] 2024-11-20T20:38:21,625 DEBUG [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148 2024-11-20T20:38:21,629 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T20:38:21,631 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T20:38:21,631 INFO [RS:0;d7d59561a64e:34897 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T20:38:21,631 INFO [RS:0;d7d59561a64e:34897 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:21,631 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T20:38:21,632 INFO [RS:0;d7d59561a64e:34897 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T20:38:21,632 INFO [RS:0;d7d59561a64e:34897 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:21,632 DEBUG [RS:0;d7d59561a64e:34897 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:21,632 DEBUG [RS:0;d7d59561a64e:34897 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:21,632 DEBUG [RS:0;d7d59561a64e:34897 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:21,632 DEBUG [RS:0;d7d59561a64e:34897 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:21,633 DEBUG [RS:0;d7d59561a64e:34897 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:21,633 DEBUG [RS:0;d7d59561a64e:34897 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d7d59561a64e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T20:38:21,633 DEBUG [RS:0;d7d59561a64e:34897 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:21,633 DEBUG [RS:0;d7d59561a64e:34897 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:21,633 DEBUG [RS:0;d7d59561a64e:34897 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:21,633 DEBUG [RS:0;d7d59561a64e:34897 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:21,633 DEBUG [RS:0;d7d59561a64e:34897 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:21,633 DEBUG [RS:0;d7d59561a64e:34897 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:21,633 DEBUG [RS:0;d7d59561a64e:34897 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d7d59561a64e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T20:38:21,633 DEBUG [RS:0;d7d59561a64e:34897 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T20:38:21,634 INFO [RS:0;d7d59561a64e:34897 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:21,634 INFO [RS:0;d7d59561a64e:34897 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:21,634 INFO [RS:0;d7d59561a64e:34897 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:21,635 INFO [RS:0;d7d59561a64e:34897 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:21,635 INFO [RS:0;d7d59561a64e:34897 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:21,635 INFO [RS:0;d7d59561a64e:34897 {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,34897,1732135101148-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T20:38:21,652 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T20:38:21,652 INFO [RS:0;d7d59561a64e:34897 {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,34897,1732135101148-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:21,652 INFO [RS:0;d7d59561a64e:34897 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:21,652 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.Replication(171): d7d59561a64e,34897,1732135101148 started 2024-11-20T20:38:21,668 INFO [RS:0;d7d59561a64e:34897 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:21,669 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer(1482): Serving as d7d59561a64e,34897,1732135101148, RpcServer on d7d59561a64e/172.17.0.2:34897, sessionid=0x10159f81dae0001 2024-11-20T20:38:21,669 DEBUG [RS:0;d7d59561a64e:34897 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T20:38:21,669 DEBUG [RS:0;d7d59561a64e:34897 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d7d59561a64e,34897,1732135101148 2024-11-20T20:38:21,669 DEBUG [RS:0;d7d59561a64e:34897 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd7d59561a64e,34897,1732135101148' 2024-11-20T20:38:21,669 DEBUG [RS:0;d7d59561a64e:34897 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T20:38:21,669 DEBUG [RS:0;d7d59561a64e:34897 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T20:38:21,670 DEBUG [RS:0;d7d59561a64e:34897 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T20:38:21,670 DEBUG [RS:0;d7d59561a64e:34897 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T20:38:21,670 DEBUG [RS:0;d7d59561a64e:34897 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d7d59561a64e,34897,1732135101148 2024-11-20T20:38:21,670 DEBUG [RS:0;d7d59561a64e:34897 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd7d59561a64e,34897,1732135101148' 2024-11-20T20:38:21,670 DEBUG [RS:0;d7d59561a64e:34897 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T20:38:21,670 DEBUG [RS:0;d7d59561a64e:34897 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T20:38:21,670 DEBUG [RS:0;d7d59561a64e:34897 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T20:38:21,670 INFO [RS:0;d7d59561a64e:34897 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T20:38:21,670 INFO [RS:0;d7d59561a64e:34897 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T20:38:21,694 WARN [d7d59561a64e:38997 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T20:38:21,773 INFO [RS:0;d7d59561a64e:34897 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C34897%2C1732135101148, suffix=, logDir=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148, archiveDir=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/oldWALs, maxLogs=32 2024-11-20T20:38:21,774 INFO [RS:0;d7d59561a64e:34897 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C34897%2C1732135101148.1732135101774 2024-11-20T20:38:21,781 INFO [RS:0;d7d59561a64e:34897 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135101774 2024-11-20T20:38:21,782 DEBUG [RS:0;d7d59561a64e:34897 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41139:41139),(127.0.0.1/127.0.0.1:39591:39591)] 2024-11-20T20:38:21,944 DEBUG [d7d59561a64e:38997 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T20:38:21,945 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d7d59561a64e,34897,1732135101148 2024-11-20T20:38:21,947 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d7d59561a64e,34897,1732135101148, state=OPENING 2024-11-20T20:38:21,951 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T20:38:21,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:21,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:21,962 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T20:38:21,962 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:38:21,962 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:38:21,962 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d7d59561a64e,34897,1732135101148}] 2024-11-20T20:38:21,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:22,017 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T20:38:22,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:22,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:22,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:22,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:22,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:22,045 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:22,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:22,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:22,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:22,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:22,117 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T20:38:22,118 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50001, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T20:38:22,122 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T20:38:22,122 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:38:22,124 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C34897%2C1732135101148.meta, suffix=.meta, logDir=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148, archiveDir=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/oldWALs, maxLogs=32 2024-11-20T20:38:22,125 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C34897%2C1732135101148.meta.1732135102125.meta 2024-11-20T20:38:22,130 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.meta.1732135102125.meta 2024-11-20T20:38:22,131 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41139:41139),(127.0.0.1/127.0.0.1:39591:39591)] 2024-11-20T20:38:22,132 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:38:22,132 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T20:38:22,132 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T20:38:22,132 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T20:38:22,133 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T20:38:22,133 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:38:22,133 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T20:38:22,133 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T20:38:22,136 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T20:38:22,137 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T20:38:22,138 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:22,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:22,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T20:38:22,139 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T20:38:22,139 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:22,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:22,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T20:38:22,141 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T20:38:22,141 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:22,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:22,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T20:38:22,142 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T20:38:22,142 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:22,143 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:22,143 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T20:38:22,144 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740 2024-11-20T20:38:22,145 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740 2024-11-20T20:38:22,146 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T20:38:22,146 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T20:38:22,147 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T20:38:22,148 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T20:38:22,149 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=765018, jitterRate=-0.027229800820350647}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T20:38:22,149 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T20:38:22,150 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732135102133Writing region info on filesystem at 1732135102133Initializing all the Stores at 1732135102134 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135102134Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135102136 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135102136Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135102136Cleaning up temporary data from old regions at 1732135102146 (+10 ms)Running coprocessor post-open hooks at 1732135102149 (+3 ms)Region opened successfully at 1732135102150 (+1 ms) 2024-11-20T20:38:22,151 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732135102116 2024-11-20T20:38:22,154 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T20:38:22,154 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T20:38:22,155 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d7d59561a64e,34897,1732135101148 2024-11-20T20:38:22,156 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d7d59561a64e,34897,1732135101148, state=OPEN 2024-11-20T20:38:22,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T20:38:22,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T20:38:22,193 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d7d59561a64e,34897,1732135101148 2024-11-20T20:38:22,193 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:38:22,193 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:38:22,197 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T20:38:22,197 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d7d59561a64e,34897,1732135101148 in 231 msec 2024-11-20T20:38:22,201 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T20:38:22,201 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 656 msec 2024-11-20T20:38:22,202 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:38:22,202 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T20:38:22,204 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T20:38:22,204 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d7d59561a64e,34897,1732135101148, seqNum=-1] 2024-11-20T20:38:22,204 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T20:38:22,206 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53543, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T20:38:22,213 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 720 msec 2024-11-20T20:38:22,213 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732135102213, completionTime=-1 2024-11-20T20:38:22,213 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T20:38:22,213 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T20:38:22,215 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T20:38:22,215 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732135162215 2024-11-20T20:38:22,215 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732135222215 2024-11-20T20:38:22,216 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-20T20:38:22,216 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,38997,1732135100948-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:22,216 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,38997,1732135100948-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:22,216 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,38997,1732135100948-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:22,216 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d7d59561a64e:38997, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:22,216 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:22,217 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:22,218 DEBUG [master/d7d59561a64e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T20:38:22,221 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.996sec 2024-11-20T20:38:22,221 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T20:38:22,221 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T20:38:22,221 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T20:38:22,221 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T20:38:22,221 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T20:38:22,221 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,38997,1732135100948-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T20:38:22,222 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,38997,1732135100948-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T20:38:22,224 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T20:38:22,225 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T20:38:22,225 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,38997,1732135100948-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:22,278 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@641c7c70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:38:22,279 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d7d59561a64e,38997,-1 for getting cluster id 2024-11-20T20:38:22,279 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T20:38:22,280 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '00af26f4-346c-469a-a070-87bef77651cf' 2024-11-20T20:38:22,281 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T20:38:22,281 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "00af26f4-346c-469a-a070-87bef77651cf" 2024-11-20T20:38:22,281 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fcfcb20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:38:22,281 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d7d59561a64e,38997,-1] 2024-11-20T20:38:22,281 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T20:38:22,282 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:38:22,283 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51792, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T20:38:22,284 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c17f84d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:38:22,284 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T20:38:22,285 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d7d59561a64e,34897,1732135101148, seqNum=-1] 2024-11-20T20:38:22,286 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T20:38:22,287 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54554, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T20:38:22,289 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d7d59561a64e,38997,1732135100948 2024-11-20T20:38:22,290 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:38:22,293 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T20:38:22,293 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-20T20:38:22,293 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-20T20:38:22,293 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T20:38:22,294 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is d7d59561a64e,38997,1732135100948 2024-11-20T20:38:22,294 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@35eb9842 2024-11-20T20:38:22,294 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T20:38:22,296 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51808, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T20:38:22,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38997 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T20:38:22,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38997 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T20:38:22,297 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38997 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T20:38:22,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38997 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-20T20:38:22,300 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T20:38:22,300 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:22,300 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38997 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-20T20:38:22,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38997 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T20:38:22,302 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T20:38:22,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37297 is added to blk_1073741835_1011 (size=395) 2024-11-20T20:38:22,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43621 is added to blk_1073741835_1011 (size=395) 2024-11-20T20:38:22,310 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 37bbdad65fb0e940db71ac3cc8bb993e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3 2024-11-20T20:38:22,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:22,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43621 is added to blk_1073741836_1012 (size=78) 2024-11-20T20:38:22,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37297 is added to blk_1073741836_1012 (size=78) 2024-11-20T20:38:22,318 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:38:22,319 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 37bbdad65fb0e940db71ac3cc8bb993e, disabling compactions & flushes 2024-11-20T20:38:22,319 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e. 2024-11-20T20:38:22,319 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e. 2024-11-20T20:38:22,319 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e. after waiting 0 ms 2024-11-20T20:38:22,319 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e. 2024-11-20T20:38:22,319 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e. 2024-11-20T20:38:22,319 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 37bbdad65fb0e940db71ac3cc8bb993e: Waiting for close lock at 1732135102318Disabling compacts and flushes for region at 1732135102318Disabling writes for close at 1732135102319 (+1 ms)Writing region close event to WAL at 1732135102319Closed at 1732135102319 2024-11-20T20:38:22,321 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T20:38:22,321 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732135102321"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732135102321"}]},"ts":"1732135102321"} 2024-11-20T20:38:22,324 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T20:38:22,325 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T20:38:22,325 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732135102325"}]},"ts":"1732135102325"} 2024-11-20T20:38:22,328 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-20T20:38:22,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=37bbdad65fb0e940db71ac3cc8bb993e, ASSIGN}] 2024-11-20T20:38:22,329 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=37bbdad65fb0e940db71ac3cc8bb993e, ASSIGN 2024-11-20T20:38:22,330 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=37bbdad65fb0e940db71ac3cc8bb993e, ASSIGN; state=OFFLINE, location=d7d59561a64e,34897,1732135101148; forceNewPlan=false, retain=false 2024-11-20T20:38:22,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:22,481 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=37bbdad65fb0e940db71ac3cc8bb993e, regionState=OPENING, regionLocation=d7d59561a64e,34897,1732135101148 2024-11-20T20:38:22,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:22,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=37bbdad65fb0e940db71ac3cc8bb993e, ASSIGN because future has completed 2024-11-20T20:38:22,485 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 37bbdad65fb0e940db71ac3cc8bb993e, server=d7d59561a64e,34897,1732135101148}] 2024-11-20T20:38:22,643 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e. 2024-11-20T20:38:22,643 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 37bbdad65fb0e940db71ac3cc8bb993e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e.', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:38:22,644 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 37bbdad65fb0e940db71ac3cc8bb993e 2024-11-20T20:38:22,644 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:38:22,644 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 37bbdad65fb0e940db71ac3cc8bb993e 2024-11-20T20:38:22,644 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 37bbdad65fb0e940db71ac3cc8bb993e 2024-11-20T20:38:22,646 INFO [StoreOpener-37bbdad65fb0e940db71ac3cc8bb993e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 37bbdad65fb0e940db71ac3cc8bb993e 2024-11-20T20:38:22,647 INFO [StoreOpener-37bbdad65fb0e940db71ac3cc8bb993e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 37bbdad65fb0e940db71ac3cc8bb993e columnFamilyName info 2024-11-20T20:38:22,647 DEBUG [StoreOpener-37bbdad65fb0e940db71ac3cc8bb993e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:22,648 INFO [StoreOpener-37bbdad65fb0e940db71ac3cc8bb993e-1 {}] regionserver.HStore(327): Store=37bbdad65fb0e940db71ac3cc8bb993e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:38:22,648 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 37bbdad65fb0e940db71ac3cc8bb993e 2024-11-20T20:38:22,649 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/default/TestLogRolling-testLogRollOnPipelineRestart/37bbdad65fb0e940db71ac3cc8bb993e 2024-11-20T20:38:22,649 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/default/TestLogRolling-testLogRollOnPipelineRestart/37bbdad65fb0e940db71ac3cc8bb993e 2024-11-20T20:38:22,649 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 37bbdad65fb0e940db71ac3cc8bb993e 2024-11-20T20:38:22,649 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 37bbdad65fb0e940db71ac3cc8bb993e 2024-11-20T20:38:22,651 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 37bbdad65fb0e940db71ac3cc8bb993e 2024-11-20T20:38:22,656 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/default/TestLogRolling-testLogRollOnPipelineRestart/37bbdad65fb0e940db71ac3cc8bb993e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:38:22,657 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 37bbdad65fb0e940db71ac3cc8bb993e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=788503, jitterRate=0.002633914351463318}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T20:38:22,657 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 37bbdad65fb0e940db71ac3cc8bb993e 2024-11-20T20:38:22,658 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 37bbdad65fb0e940db71ac3cc8bb993e: Running coprocessor pre-open hook at 1732135102644Writing region info on filesystem at 1732135102644Initializing all the Stores at 1732135102645 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135102645Cleaning up temporary data from old regions at 1732135102649 (+4 ms)Running coprocessor post-open hooks at 1732135102657 (+8 ms)Region opened successfully at 1732135102658 (+1 ms) 2024-11-20T20:38:22,659 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e., pid=6, masterSystemTime=1732135102638 2024-11-20T20:38:22,662 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e. 2024-11-20T20:38:22,662 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e. 2024-11-20T20:38:22,663 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=37bbdad65fb0e940db71ac3cc8bb993e, regionState=OPEN, openSeqNum=2, regionLocation=d7d59561a64e,34897,1732135101148 2024-11-20T20:38:22,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 37bbdad65fb0e940db71ac3cc8bb993e, server=d7d59561a64e,34897,1732135101148 because future has completed 2024-11-20T20:38:22,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T20:38:22,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 37bbdad65fb0e940db71ac3cc8bb993e, server=d7d59561a64e,34897,1732135101148 in 183 msec 2024-11-20T20:38:22,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T20:38:22,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=37bbdad65fb0e940db71ac3cc8bb993e, ASSIGN in 343 msec 2024-11-20T20:38:22,675 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T20:38:22,675 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732135102675"}]},"ts":"1732135102675"} 2024-11-20T20:38:22,677 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-20T20:38:22,678 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T20:38:22,681 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 381 msec 2024-11-20T20:38:22,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:23,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:23,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:23,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:23,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:24,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:24,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:24,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:24,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:25,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:25,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:25,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:25,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:26,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:26,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:26,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:26,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T20:38:26,486 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T20:38:26,487 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-20T20:38:26,487 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-20T20:38:26,488 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T20:38:26,488 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T20:38:26,488 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T20:38:26,488 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T20:38:26,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:27,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:27,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:27,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:27,636 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T20:38:27,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:27,659 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:27,659 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:27,659 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:27,660 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:27,660 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:27,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:27,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:27,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:27,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:27,673 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T20:38:27,673 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-20T20:38:27,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:28,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:28,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:28,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:28,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:29,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:29,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:29,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:29,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:30,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:30,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:30,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:30,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:31,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:31,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:31,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:31,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:32,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:32,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38997 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T20:38:32,407 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-20T20:38:32,407 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-20T20:38:32,411 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-20T20:38:32,411 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e. 2024-11-20T20:38:32,417 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e., hostname=d7d59561a64e,34897,1732135101148, seqNum=2] 2024-11-20T20:38:32,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:32,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:32,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:33,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:33,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:33,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:33,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:34,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:34,420 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135101774 2024-11-20T20:38:34,421 WARN [ResponseProcessor for block BP-1325861705-172.17.0.2-1732135098701:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1325861705-172.17.0.2-1732135098701:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:34,421 WARN [ResponseProcessor for block BP-1325861705-172.17.0.2-1732135098701:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1325861705-172.17.0.2-1732135098701:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:34,422 WARN [ResponseProcessor for block BP-1325861705-172.17.0.2-1732135098701:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1325861705-172.17.0.2-1732135098701:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1325861705-172.17.0.2-1732135098701:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:43621,DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:34,422 WARN [DataStreamer for file /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135101774 block BP-1325861705-172.17.0.2-1732135098701:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1325861705-172.17.0.2-1732135098701:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43621,DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face,DISK], DatanodeInfoWithStorage[127.0.0.1:37297,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43621,DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face,DISK]) is bad. 2024-11-20T20:38:34,422 WARN [DataStreamer for file /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/WALs/d7d59561a64e,38997,1732135100948/d7d59561a64e%2C38997%2C1732135100948.1732135101288 block BP-1325861705-172.17.0.2-1732135098701:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1325861705-172.17.0.2-1732135098701:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37297,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK], DatanodeInfoWithStorage[127.0.0.1:43621,DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43621,DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face,DISK]) is bad. 2024-11-20T20:38:34,422 WARN [PacketResponder: BP-1325861705-172.17.0.2-1732135098701:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43621] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:34,422 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_710142480_22 at /127.0.0.1:42296 [Receiving block BP-1325861705-172.17.0.2-1732135098701:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43621:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42296 dst: /127.0.0.1:43621 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:34,423 WARN [DataStreamer for file /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.meta.1732135102125.meta block BP-1325861705-172.17.0.2-1732135098701:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1325861705-172.17.0.2-1732135098701:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43621,DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face,DISK], DatanodeInfoWithStorage[127.0.0.1:37297,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43621,DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face,DISK]) is bad. 2024-11-20T20:38:34,423 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-307592138_22 at /127.0.0.1:33640 [Receiving block BP-1325861705-172.17.0.2-1732135098701:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37297:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33640 dst: /127.0.0.1:37297 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:34,423 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_710142480_22 at /127.0.0.1:33666 [Receiving block BP-1325861705-172.17.0.2-1732135098701:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37297:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33666 dst: /127.0.0.1:37297 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:34,423 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-307592138_22 at /127.0.0.1:42268 [Receiving block BP-1325861705-172.17.0.2-1732135098701:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43621:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42268 dst: /127.0.0.1:43621 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:34,423 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_710142480_22 at /127.0.0.1:45516 [Receiving block BP-1325861705-172.17.0.2-1732135098701:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43621:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45516 dst: /127.0.0.1:43621 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:34,423 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_710142480_22 at /127.0.0.1:37136 [Receiving block BP-1325861705-172.17.0.2-1732135098701:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37297:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37136 dst: /127.0.0.1:37297 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:34,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@598d9c74{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:38:34,457 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25c074d6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:38:34,457 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:38:34,458 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7bb6ca3f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:38:34,458 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53d4da47{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.log.dir/,STOPPED} 2024-11-20T20:38:34,460 WARN [BP-1325861705-172.17.0.2-1732135098701 heartbeating to localhost/127.0.0.1:38981 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:38:34,460 WARN [BP-1325861705-172.17.0.2-1732135098701 heartbeating to localhost/127.0.0.1:38981 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1325861705-172.17.0.2-1732135098701 (Datanode Uuid 9d803552-fc36-44b4-af02-b5fdff5dea2b) service to localhost/127.0.0.1:38981 2024-11-20T20:38:34,460 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:38:34,460 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:38:34,461 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/data/data3/current/BP-1325861705-172.17.0.2-1732135098701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:38:34,461 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/data/data4/current/BP-1325861705-172.17.0.2-1732135098701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:38:34,461 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:38:34,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:34,470 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:38:34,474 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:38:34,475 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:38:34,475 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:38:34,475 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T20:38:34,476 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6143724{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:38:34,476 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61be23b1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:38:34,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:34,584 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4dfcaa19{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/java.io.tmpdir/jetty-localhost-43401-hadoop-hdfs-3_4_1-tests_jar-_-any-2733628167481942834/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:38:34,584 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e7340de{HTTP/1.1, (http/1.1)}{localhost:43401} 2024-11-20T20:38:34,584 INFO [Time-limited test {}] server.Server(415): Started @188291ms 2024-11-20T20:38:34,586 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:38:34,603 WARN [ResponseProcessor for block BP-1325861705-172.17.0.2-1732135098701:blk_1073741833_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1325861705-172.17.0.2-1732135098701:blk_1073741833_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:34,603 WARN [ResponseProcessor for block BP-1325861705-172.17.0.2-1732135098701:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1325861705-172.17.0.2-1732135098701:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:34,603 WARN [ResponseProcessor for block BP-1325861705-172.17.0.2-1732135098701:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1325861705-172.17.0.2-1732135098701:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:34,603 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_710142480_22 at /127.0.0.1:55614 [Receiving block BP-1325861705-172.17.0.2-1732135098701:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37297:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55614 dst: /127.0.0.1:37297 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:34,603 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-307592138_22 at /127.0.0.1:55646 [Receiving block BP-1325861705-172.17.0.2-1732135098701:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37297:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55646 dst: /127.0.0.1:37297 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:34,603 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_710142480_22 at /127.0.0.1:55630 [Receiving block BP-1325861705-172.17.0.2-1732135098701:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37297:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55630 dst: /127.0.0.1:37297 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:34,615 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@417b29aa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:38:34,616 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@350106ae{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:38:34,616 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:38:34,616 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21e3a41b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:38:34,616 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9dc8204{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.log.dir/,STOPPED} 2024-11-20T20:38:34,617 WARN [BP-1325861705-172.17.0.2-1732135098701 heartbeating to localhost/127.0.0.1:38981 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:38:34,617 WARN [BP-1325861705-172.17.0.2-1732135098701 heartbeating to localhost/127.0.0.1:38981 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1325861705-172.17.0.2-1732135098701 (Datanode Uuid ce80fef2-4f68-4977-8d41-3f9d66556ca9) service to localhost/127.0.0.1:38981 2024-11-20T20:38:34,617 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:38:34,617 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:38:34,618 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/data/data1/current/BP-1325861705-172.17.0.2-1732135098701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:38:34,618 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/data/data2/current/BP-1325861705-172.17.0.2-1732135098701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:38:34,618 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:38:34,627 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:38:34,632 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:38:34,633 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:38:34,633 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:38:34,633 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T20:38:34,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1173a7e2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:38:34,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36f6f2a9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:38:34,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6023d9e0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/java.io.tmpdir/jetty-localhost-39759-hadoop-hdfs-3_4_1-tests_jar-_-any-10976845764346918615/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:38:34,748 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6432dc3{HTTP/1.1, (http/1.1)}{localhost:39759} 2024-11-20T20:38:34,748 INFO [Time-limited test {}] server.Server(415): Started @188455ms 2024-11-20T20:38:34,750 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:38:34,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:35,155 WARN [Thread-1340 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:38:35,157 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x778961ce43978bfc with lease ID 0x1b952dc97de12096: from storage DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face node DatanodeRegistration(127.0.0.1:35107, datanodeUuid=9d803552-fc36-44b4-af02-b5fdff5dea2b, infoPort=40813, infoSecurePort=0, ipcPort=39603, storageInfo=lv=-57;cid=testClusterID;nsid=2028321080;c=1732135098701), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:38:35,157 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x778961ce43978bfc with lease ID 0x1b952dc97de12096: from storage DS-5fd445f0-f492-4a9e-8042-eb2b08e31f18 node DatanodeRegistration(127.0.0.1:35107, datanodeUuid=9d803552-fc36-44b4-af02-b5fdff5dea2b, infoPort=40813, infoSecurePort=0, ipcPort=39603, storageInfo=lv=-57;cid=testClusterID;nsid=2028321080;c=1732135098701), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:38:35,304 WARN [Thread-1360 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:38:35,307 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55f34f1e9470aa73 with lease ID 0x1b952dc97de12097: from storage DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b node DatanodeRegistration(127.0.0.1:33311, datanodeUuid=ce80fef2-4f68-4977-8d41-3f9d66556ca9, infoPort=38355, infoSecurePort=0, ipcPort=39933, storageInfo=lv=-57;cid=testClusterID;nsid=2028321080;c=1732135098701), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:38:35,307 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55f34f1e9470aa73 with lease ID 0x1b952dc97de12097: from storage DS-c42ca20d-e1ea-4296-98f5-4e2afec944e1 node DatanodeRegistration(127.0.0.1:33311, datanodeUuid=ce80fef2-4f68-4977-8d41-3f9d66556ca9, infoPort=38355, infoSecurePort=0, ipcPort=39933, storageInfo=lv=-57;cid=testClusterID;nsid=2028321080;c=1732135098701), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:38:35,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:35,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:35,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:35,781 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-20T20:38:35,784 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-20T20:38:35,785 ERROR [FSHLog-0-hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3-prefix:d7d59561a64e,34897,1732135101148 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37297,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:35,785 WARN [FSHLog-0-hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3-prefix:d7d59561a64e,34897,1732135101148 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37297,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:35,785 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d7d59561a64e%2C34897%2C1732135101148:(num 1732135101774) roll requested 2024-11-20T20:38:35,786 INFO [regionserver/d7d59561a64e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C34897%2C1732135101148.1732135115785 2024-11-20T20:38:35,791 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135101774 newFile=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 2024-11-20T20:38:35,791 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:35,791 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:35,791 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:35,791 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:35,792 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:35,792 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135101774 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 2024-11-20T20:38:35,792 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37297,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:35,792 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37297,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:35,792 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135101774 2024-11-20T20:38:35,793 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40813:40813),(127.0.0.1/127.0.0.1:38355:38355)] 2024-11-20T20:38:35,793 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135101774 is not closed yet, will try archiving it next time 2024-11-20T20:38:35,793 WARN [IPC Server handler 3 on default port 38981 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135101774 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1013 2024-11-20T20:38:35,793 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135101774 after 1ms 2024-11-20T20:38:35,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:36,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33311 is added to blk_1073741833_1017 (size=1632) 2024-11-20T20:38:36,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:36,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:36,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:36,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:37,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:37,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:37,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:37,796 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-20T20:38:37,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:38,158 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1013: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-20T20:38:38,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:38,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:38,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:38,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:39,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:39,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:39,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:39,794 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135101774 after 4002ms 2024-11-20T20:38:39,799 WARN [ResponseProcessor for block BP-1325861705-172.17.0.2-1732135098701:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1325861705-172.17.0.2-1732135098701:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1325861705-172.17.0.2-1732135098701:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:33311,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:39,799 WARN [DataStreamer for file /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 block BP-1325861705-172.17.0.2-1732135098701:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1325861705-172.17.0.2-1732135098701:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35107,DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face,DISK], DatanodeInfoWithStorage[127.0.0.1:33311,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33311,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]) is bad. 2024-11-20T20:38:39,799 WARN [PacketResponder: BP-1325861705-172.17.0.2-1732135098701:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33311] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:39,800 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_710142480_22 at /127.0.0.1:40288 [Receiving block BP-1325861705-172.17.0.2-1732135098701:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35107:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40288 dst: /127.0.0.1:35107 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:39,800 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_710142480_22 at /127.0.0.1:46804 [Receiving block BP-1325861705-172.17.0.2-1732135098701:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33311:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46804 dst: /127.0.0.1:33311 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:39,871 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6023d9e0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:38:39,871 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6432dc3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:38:39,871 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:38:39,871 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36f6f2a9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:38:39,871 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1173a7e2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.log.dir/,STOPPED} 2024-11-20T20:38:39,873 WARN [BP-1325861705-172.17.0.2-1732135098701 heartbeating to localhost/127.0.0.1:38981 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:38:39,873 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:38:39,873 WARN [BP-1325861705-172.17.0.2-1732135098701 heartbeating to localhost/127.0.0.1:38981 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1325861705-172.17.0.2-1732135098701 (Datanode Uuid ce80fef2-4f68-4977-8d41-3f9d66556ca9) service to localhost/127.0.0.1:38981 2024-11-20T20:38:39,873 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:38:39,874 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/data/data1/current/BP-1325861705-172.17.0.2-1732135098701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:38:39,874 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/data/data2/current/BP-1325861705-172.17.0.2-1732135098701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:38:39,874 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:38:39,887 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:38:39,893 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:38:39,896 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:38:39,896 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:38:39,896 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T20:38:39,901 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48f4fa87{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:38:39,901 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@686dd8bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:38:39,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:40,014 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10e0b03{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/java.io.tmpdir/jetty-localhost-37249-hadoop-hdfs-3_4_1-tests_jar-_-any-638280141918702883/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:38:40,015 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c894b0d{HTTP/1.1, (http/1.1)}{localhost:37249} 2024-11-20T20:38:40,015 INFO [Time-limited test {}] server.Server(415): Started @193721ms 2024-11-20T20:38:40,017 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:38:40,077 WARN [ResponseProcessor for block BP-1325861705-172.17.0.2-1732135098701:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1325861705-172.17.0.2-1732135098701:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:40,078 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_710142480_22 at /127.0.0.1:40298 [Receiving block BP-1325861705-172.17.0.2-1732135098701:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35107:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40298 dst: /127.0.0.1:35107 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:40,083 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4dfcaa19{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:38:40,083 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e7340de{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:38:40,083 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:38:40,083 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61be23b1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:38:40,083 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6143724{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.log.dir/,STOPPED} 2024-11-20T20:38:40,084 WARN [BP-1325861705-172.17.0.2-1732135098701 heartbeating to localhost/127.0.0.1:38981 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:38:40,084 WARN [BP-1325861705-172.17.0.2-1732135098701 heartbeating to localhost/127.0.0.1:38981 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1325861705-172.17.0.2-1732135098701 (Datanode Uuid 9d803552-fc36-44b4-af02-b5fdff5dea2b) service to localhost/127.0.0.1:38981 2024-11-20T20:38:40,085 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/data/data3/current/BP-1325861705-172.17.0.2-1732135098701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:38:40,085 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/data/data4/current/BP-1325861705-172.17.0.2-1732135098701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:38:40,086 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:38:40,086 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:38:40,086 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:38:40,101 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:38:40,108 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:38:40,113 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:38:40,113 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:38:40,113 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T20:38:40,117 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a0c1f66{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:38:40,117 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f2d69a0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:38:40,257 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@478c28d0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/java.io.tmpdir/jetty-localhost-45457-hadoop-hdfs-3_4_1-tests_jar-_-any-13339545722869460991/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:38:40,257 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@8cbc01a{HTTP/1.1, (http/1.1)}{localhost:45457} 2024-11-20T20:38:40,257 INFO [Time-limited test {}] server.Server(415): Started @193964ms 2024-11-20T20:38:40,259 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:38:40,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:40,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:40,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:40,538 WARN [Thread-1414 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:38:40,541 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64d38ac49cee28b1 with lease ID 0x1b952dc97de12098: from storage DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b node DatanodeRegistration(127.0.0.1:46429, datanodeUuid=ce80fef2-4f68-4977-8d41-3f9d66556ca9, infoPort=35747, infoSecurePort=0, ipcPort=38971, storageInfo=lv=-57;cid=testClusterID;nsid=2028321080;c=1732135098701), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:38:40,541 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64d38ac49cee28b1 with lease ID 0x1b952dc97de12098: from storage DS-c42ca20d-e1ea-4296-98f5-4e2afec944e1 node DatanodeRegistration(127.0.0.1:46429, datanodeUuid=ce80fef2-4f68-4977-8d41-3f9d66556ca9, infoPort=35747, infoSecurePort=0, ipcPort=38971, storageInfo=lv=-57;cid=testClusterID;nsid=2028321080;c=1732135098701), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:38:40,782 WARN [Thread-1434 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:38:40,785 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6b9a75ccee8ee9b2 with lease ID 0x1b952dc97de12099: from storage DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face node DatanodeRegistration(127.0.0.1:33795, datanodeUuid=9d803552-fc36-44b4-af02-b5fdff5dea2b, infoPort=37553, infoSecurePort=0, ipcPort=37279, storageInfo=lv=-57;cid=testClusterID;nsid=2028321080;c=1732135098701), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:38:40,786 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6b9a75ccee8ee9b2 with lease ID 0x1b952dc97de12099: from storage DS-5fd445f0-f492-4a9e-8042-eb2b08e31f18 node DatanodeRegistration(127.0.0.1:33795, datanodeUuid=9d803552-fc36-44b4-af02-b5fdff5dea2b, infoPort=37553, infoSecurePort=0, ipcPort=37279, storageInfo=lv=-57;cid=testClusterID;nsid=2028321080;c=1732135098701), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:38:40,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:41,313 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-20T20:38:41,316 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-20T20:38:41,317 ERROR [FSHLog-0-hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3-prefix:d7d59561a64e,34897,1732135101148 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35107,DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:41,317 WARN [FSHLog-0-hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3-prefix:d7d59561a64e,34897,1732135101148 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35107,DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:41,318 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d7d59561a64e%2C34897%2C1732135101148:(num 1732135115785) roll requested 2024-11-20T20:38:41,318 INFO [regionserver/d7d59561a64e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C34897%2C1732135101148.1732135121318 2024-11-20T20:38:41,326 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 newFile=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135121318 2024-11-20T20:38:41,326 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:41,326 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:41,326 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:41,326 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:41,326 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:41,327 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135121318 2024-11-20T20:38:41,327 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35107,DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:41,327 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35107,DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:41,327 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 2024-11-20T20:38:41,327 WARN [IPC Server handler 0 on default port 38981 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-20T20:38:41,328 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 after 1ms 2024-11-20T20:38:41,328 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37553:37553),(127.0.0.1/127.0.0.1:35747:35747)] 2024-11-20T20:38:41,328 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 is not closed yet, will try archiving it next time 2024-11-20T20:38:41,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:41,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:41,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:41,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:42,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:42,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:42,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:42,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:43,330 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C34897%2C1732135101148.1732135123330 2024-11-20T20:38:43,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:43,340 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135121318 newFile=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 2024-11-20T20:38:43,341 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:43,341 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:43,341 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:43,341 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:43,341 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:43,341 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135121318 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 2024-11-20T20:38:43,342 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35747:35747),(127.0.0.1/127.0.0.1:37553:37553)] 2024-11-20T20:38:43,342 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 is not closed yet, will try archiving it next time 2024-11-20T20:38:43,342 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135121318 is not closed yet, will try archiving it next time 2024-11-20T20:38:43,342 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135101774 2024-11-20T20:38:43,342 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135101774 2024-11-20T20:38:43,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46429 is added to blk_1073741838_1019 (size=1264) 2024-11-20T20:38:43,343 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135101774 after 1ms 2024-11-20T20:38:43,343 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135101774 2024-11-20T20:38:43,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33795 is added to blk_1073741838_1019 (size=1264) 2024-11-20T20:38:43,345 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 is not closed yet, will try archiving it next time 2024-11-20T20:38:43,352 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732135102658/Put/vlen=218/seqid=0] 2024-11-20T20:38:43,353 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732135112418/Put/vlen=1045/seqid=0] 2024-11-20T20:38:43,353 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135101774 2024-11-20T20:38:43,353 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 2024-11-20T20:38:43,353 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 2024-11-20T20:38:43,353 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 after 0ms 2024-11-20T20:38:43,353 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 2024-11-20T20:38:43,357 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732135115785/Put/vlen=1045/seqid=0] 2024-11-20T20:38:43,357 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732135117797/Put/vlen=1045/seqid=0] 2024-11-20T20:38:43,357 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 2024-11-20T20:38:43,357 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135121318 2024-11-20T20:38:43,357 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135121318 2024-11-20T20:38:43,358 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135121318 after 1ms 2024-11-20T20:38:43,358 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135121318 2024-11-20T20:38:43,362 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732135121317/Put/vlen=1045/seqid=0] 2024-11-20T20:38:43,362 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 2024-11-20T20:38:43,362 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 2024-11-20T20:38:43,362 WARN [IPC Server handler 2 on default port 38981 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-20T20:38:43,362 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 after 0ms 2024-11-20T20:38:43,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:43,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:43,797 WARN [ResponseProcessor for block BP-1325861705-172.17.0.2-1732135098701:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1325861705-172.17.0.2-1732135098701:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:43,797 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-307592138_22 at /127.0.0.1:55312 [Receiving block BP-1325861705-172.17.0.2-1732135098701:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:46429:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55312 dst: /127.0.0.1:46429 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:46429 remote=/127.0.0.1:55312]. Total timeout mills is 60000, 59542 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:43,797 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-307592138_22 at /127.0.0.1:33492 [Receiving block BP-1325861705-172.17.0.2-1732135098701:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:33795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33492 dst: /127.0.0.1:33795 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:43,798 WARN [DataStreamer for file /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 block BP-1325861705-172.17.0.2-1732135098701:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1325861705-172.17.0.2-1732135098701:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46429,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK], DatanodeInfoWithStorage[127.0.0.1:33795,DS-87ec2e13-a44e-4ebe-9cd8-96fdc5b0face,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46429,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]) is bad. 2024-11-20T20:38:43,798 WARN [DataStreamer for file /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 block BP-1325861705-172.17.0.2-1732135098701:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1325861705-172.17.0.2-1732135098701:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:43,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46429 is added to blk_1073741839_1022 (size=85) 2024-11-20T20:38:43,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:44,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:44,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 after 68065ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:38:44,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:44,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:45,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:45,328 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135115785 after 4001ms 2024-11-20T20:38:45,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:45,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:45,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:46,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:46,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:46,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:46,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:46,541 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-20T20:38:47,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:47,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:47,363 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 after 4001ms 2024-11-20T20:38:47,364 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 2024-11-20T20:38:47,370 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 2024-11-20T20:38:47,370 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 37bbdad65fb0e940db71ac3cc8bb993e 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-20T20:38:47,371 ERROR [FSHLog-0-hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3-prefix:d7d59561a64e,34897,1732135101148 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1325861705-172.17.0.2-1732135098701:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:47,371 WARN [FSHLog-0-hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3-prefix:d7d59561a64e,34897,1732135101148 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1325861705-172.17.0.2-1732135098701:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:47,372 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d7d59561a64e%2C34897%2C1732135101148:(num 1732135123330) roll requested 2024-11-20T20:38:47,372 INFO [regionserver/d7d59561a64e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C34897%2C1732135101148.1732135127372 2024-11-20T20:38:47,378 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 newFile=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135127372 2024-11-20T20:38:47,378 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:47,378 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:47,378 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:47,379 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:47,379 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:47,379 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135127372 2024-11-20T20:38:47,379 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1325861705-172.17.0.2-1732135098701:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:47,379 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1325861705-172.17.0.2-1732135098701:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:47,380 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35747:35747),(127.0.0.1/127.0.0.1:37553:37553)] 2024-11-20T20:38:47,380 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 2024-11-20T20:38:47,380 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 is not closed yet, will try archiving it next time 2024-11-20T20:38:47,380 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 after 0ms 2024-11-20T20:38:47,381 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.1732135123330 to hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/oldWALs/d7d59561a64e%2C34897%2C1732135101148.1732135123330 2024-11-20T20:38:47,397 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/default/TestLogRolling-testLogRollOnPipelineRestart/37bbdad65fb0e940db71ac3cc8bb993e/.tmp/info/e9ddbe52d4f642838e7786940449882b is 1080, key is row1002/info:/1732135112418/Put/seqid=0 2024-11-20T20:38:47,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33795 is added to blk_1073741841_1024 (size=9270) 2024-11-20T20:38:47,403 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/default/TestLogRolling-testLogRollOnPipelineRestart/37bbdad65fb0e940db71ac3cc8bb993e/.tmp/info/e9ddbe52d4f642838e7786940449882b 2024-11-20T20:38:47,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46429 is added to blk_1073741841_1024 (size=9270) 2024-11-20T20:38:47,409 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/default/TestLogRolling-testLogRollOnPipelineRestart/37bbdad65fb0e940db71ac3cc8bb993e/.tmp/info/e9ddbe52d4f642838e7786940449882b as hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/default/TestLogRolling-testLogRollOnPipelineRestart/37bbdad65fb0e940db71ac3cc8bb993e/info/e9ddbe52d4f642838e7786940449882b 2024-11-20T20:38:47,416 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/default/TestLogRolling-testLogRollOnPipelineRestart/37bbdad65fb0e940db71ac3cc8bb993e/info/e9ddbe52d4f642838e7786940449882b, entries=4, sequenceid=8, filesize=9.1 K 2024-11-20T20:38:47,417 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 37bbdad65fb0e940db71ac3cc8bb993e in 47ms, sequenceid=8, compaction requested=false 2024-11-20T20:38:47,417 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 37bbdad65fb0e940db71ac3cc8bb993e: 2024-11-20T20:38:47,417 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-20T20:38:47,418 ERROR [FSHLog-0-hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3-prefix:d7d59561a64e,34897,1732135101148.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37297,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:47,418 WARN [FSHLog-0-hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3-prefix:d7d59561a64e,34897,1732135101148.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37297,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:47,418 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d7d59561a64e%2C34897%2C1732135101148.meta:.meta(num 1732135102125) roll requested 2024-11-20T20:38:47,418 INFO [regionserver/d7d59561a64e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C34897%2C1732135101148.meta.1732135127418.meta 2024-11-20T20:38:47,424 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:47,424 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:47,424 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:47,424 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:47,424 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:47,425 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.meta.1732135102125.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.meta.1732135127418.meta 2024-11-20T20:38:47,425 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37297,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:47,425 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37297,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:47,425 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.meta.1732135102125.meta 2024-11-20T20:38:47,426 WARN [IPC Server handler 4 on default port 38981 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.meta.1732135102125.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1014 2024-11-20T20:38:47,426 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.meta.1732135102125.meta after 1ms 2024-11-20T20:38:47,426 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35747:35747),(127.0.0.1/127.0.0.1:37553:37553)] 2024-11-20T20:38:47,426 DEBUG [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.meta.1732135102125.meta is not closed yet, will try archiving it next time 2024-11-20T20:38:47,447 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/.tmp/info/a10c64af4fb14c49998d3abe009a759e is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e./info:regioninfo/1732135102663/Put/seqid=0 2024-11-20T20:38:47,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46429 is added to blk_1073741843_1027 (size=7125) 2024-11-20T20:38:47,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33795 is added to blk_1073741843_1027 (size=7125) 2024-11-20T20:38:47,454 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/.tmp/info/a10c64af4fb14c49998d3abe009a759e 2024-11-20T20:38:47,476 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/.tmp/ns/edcdcc8759d4417a80dcf5f2355f817e is 43, key is default/ns:d/1732135102206/Put/seqid=0 2024-11-20T20:38:47,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:47,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33795 is added to blk_1073741844_1028 (size=5153) 2024-11-20T20:38:47,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46429 is added to blk_1073741844_1028 (size=5153) 2024-11-20T20:38:47,482 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/.tmp/ns/edcdcc8759d4417a80dcf5f2355f817e 2024-11-20T20:38:47,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:47,502 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/.tmp/table/ffc17fe7cb2e4d97a0fe11ceae4feacc is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732135102675/Put/seqid=0 2024-11-20T20:38:47,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33795 is added to blk_1073741845_1029 (size=5438) 2024-11-20T20:38:47,507 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/.tmp/table/ffc17fe7cb2e4d97a0fe11ceae4feacc 2024-11-20T20:38:47,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46429 is added to blk_1073741845_1029 (size=5438) 2024-11-20T20:38:47,513 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/.tmp/info/a10c64af4fb14c49998d3abe009a759e as hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/info/a10c64af4fb14c49998d3abe009a759e 2024-11-20T20:38:47,520 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/info/a10c64af4fb14c49998d3abe009a759e, entries=10, sequenceid=11, filesize=7.0 K 2024-11-20T20:38:47,521 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/.tmp/ns/edcdcc8759d4417a80dcf5f2355f817e as hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/ns/edcdcc8759d4417a80dcf5f2355f817e 2024-11-20T20:38:47,527 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/ns/edcdcc8759d4417a80dcf5f2355f817e, entries=2, sequenceid=11, filesize=5.0 K 2024-11-20T20:38:47,528 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/.tmp/table/ffc17fe7cb2e4d97a0fe11ceae4feacc as hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/table/ffc17fe7cb2e4d97a0fe11ceae4feacc 2024-11-20T20:38:47,534 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/table/ffc17fe7cb2e4d97a0fe11ceae4feacc, entries=2, sequenceid=11, filesize=5.3 K 2024-11-20T20:38:47,536 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 118ms, sequenceid=11, compaction requested=false 2024-11-20T20:38:47,536 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-20T20:38:47,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T20:38:47,542 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T20:38:47,542 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:38:47,542 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:38:47,542 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:38:47,542 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T20:38:47,542 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T20:38:47,542 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1047032065, stopped=false 2024-11-20T20:38:47,542 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d7d59561a64e,38997,1732135100948 2024-11-20T20:38:47,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T20:38:47,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T20:38:47,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:47,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:47,634 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T20:38:47,634 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T20:38:47,634 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:38:47,634 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:38:47,634 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:38:47,635 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd7d59561a64e,34897,1732135101148' ***** 2024-11-20T20:38:47,635 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T20:38:47,635 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:38:47,635 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T20:38:47,635 INFO [regionserver/d7d59561a64e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T20:38:47,635 INFO [regionserver/d7d59561a64e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T20:38:47,635 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T20:38:47,635 INFO [RS:0;d7d59561a64e:34897 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T20:38:47,635 INFO [RS:0;d7d59561a64e:34897 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T20:38:47,635 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer(3091): Received CLOSE for 37bbdad65fb0e940db71ac3cc8bb993e 2024-11-20T20:38:47,635 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer(959): stopping server d7d59561a64e,34897,1732135101148 2024-11-20T20:38:47,635 INFO [RS:0;d7d59561a64e:34897 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T20:38:47,636 INFO [RS:0;d7d59561a64e:34897 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d7d59561a64e:34897. 2024-11-20T20:38:47,636 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 37bbdad65fb0e940db71ac3cc8bb993e, disabling compactions & flushes 2024-11-20T20:38:47,636 DEBUG [RS:0;d7d59561a64e:34897 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:38:47,636 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e. 2024-11-20T20:38:47,636 DEBUG [RS:0;d7d59561a64e:34897 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:38:47,636 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e. 2024-11-20T20:38:47,636 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e. after waiting 0 ms 2024-11-20T20:38:47,636 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T20:38:47,636 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e. 2024-11-20T20:38:47,636 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T20:38:47,636 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T20:38:47,636 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T20:38:47,636 INFO [regionserver/d7d59561a64e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T20:38:47,636 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-20T20:38:47,636 DEBUG [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer(1325): Online Regions={37bbdad65fb0e940db71ac3cc8bb993e=TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e., 1588230740=hbase:meta,,1.1588230740} 2024-11-20T20:38:47,636 DEBUG [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 37bbdad65fb0e940db71ac3cc8bb993e 2024-11-20T20:38:47,636 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T20:38:47,637 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T20:38:47,637 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T20:38:47,637 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T20:38:47,637 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T20:38:47,641 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/default/TestLogRolling-testLogRollOnPipelineRestart/37bbdad65fb0e940db71ac3cc8bb993e/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-20T20:38:47,641 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-20T20:38:47,642 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e. 2024-11-20T20:38:47,642 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T20:38:47,642 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 37bbdad65fb0e940db71ac3cc8bb993e: Waiting for close lock at 1732135127636Running coprocessor pre-close hooks at 1732135127636Disabling compacts and flushes for region at 1732135127636Disabling writes for close at 1732135127636Writing region close event to WAL at 1732135127637 (+1 ms)Running coprocessor post-close hooks at 1732135127642 (+5 ms)Closed at 1732135127642 2024-11-20T20:38:47,642 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T20:38:47,642 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732135127636Running coprocessor pre-close hooks at 1732135127636Disabling compacts and flushes for region at 1732135127636Disabling writes for close at 1732135127637 (+1 ms)Writing region close event to WAL at 1732135127638 (+1 ms)Running coprocessor post-close hooks at 1732135127642 (+4 ms)Closed at 1732135127642 2024-11-20T20:38:47,642 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T20:38:47,642 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732135102296.37bbdad65fb0e940db71ac3cc8bb993e. 2024-11-20T20:38:47,837 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer(976): stopping server d7d59561a64e,34897,1732135101148; all regions closed. 2024-11-20T20:38:47,837 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:47,837 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:47,838 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:47,838 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:47,838 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:47,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33795 is added to blk_1073741842_1025 (size=825) 2024-11-20T20:38:47,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46429 is added to blk_1073741842_1025 (size=825) 2024-11-20T20:38:48,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:48,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:48,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:48,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:49,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:49,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:49,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:49,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:49,786 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-20T20:38:50,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:50,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:50,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:50,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:50,928 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T20:38:51,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:51,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:51,427 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.meta.1732135102125.meta after 4002ms 2024-11-20T20:38:51,427 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/WALs/d7d59561a64e,34897,1732135101148/d7d59561a64e%2C34897%2C1732135101148.meta.1732135102125.meta to hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/oldWALs/d7d59561a64e%2C34897%2C1732135101148.meta.1732135102125.meta 2024-11-20T20:38:51,431 DEBUG [RS:0;d7d59561a64e:34897 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/oldWALs 2024-11-20T20:38:51,432 INFO [RS:0;d7d59561a64e:34897 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d7d59561a64e%2C34897%2C1732135101148.meta:.meta(num 1732135127418) 2024-11-20T20:38:51,432 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:51,432 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:51,433 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:51,433 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:51,433 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:51,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33795 is added to blk_1073741840_1023 (size=1162) 2024-11-20T20:38:51,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46429 is added to blk_1073741840_1023 (size=1162) 2024-11-20T20:38:51,443 DEBUG [RS:0;d7d59561a64e:34897 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/oldWALs 2024-11-20T20:38:51,443 INFO [RS:0;d7d59561a64e:34897 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d7d59561a64e%2C34897%2C1732135101148:(num 1732135127372) 2024-11-20T20:38:51,443 DEBUG [RS:0;d7d59561a64e:34897 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:38:51,443 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T20:38:51,443 INFO [RS:0;d7d59561a64e:34897 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T20:38:51,443 INFO [RS:0;d7d59561a64e:34897 {}] hbase.ChoreService(370): Chore service for: regionserver/d7d59561a64e:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T20:38:51,444 INFO [RS:0;d7d59561a64e:34897 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T20:38:51,444 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T20:38:51,444 INFO [RS:0;d7d59561a64e:34897 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34897 2024-11-20T20:38:51,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:51,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T20:38:51,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d7d59561a64e,34897,1732135101148 2024-11-20T20:38:51,497 INFO [RS:0;d7d59561a64e:34897 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T20:38:51,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:51,507 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d7d59561a64e,34897,1732135101148] 2024-11-20T20:38:51,518 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d7d59561a64e,34897,1732135101148 already deleted, retry=false 2024-11-20T20:38:51,518 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d7d59561a64e,34897,1732135101148 expired; onlineServers=0 2024-11-20T20:38:51,518 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd7d59561a64e,38997,1732135100948' ***** 2024-11-20T20:38:51,518 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T20:38:51,518 INFO [M:0;d7d59561a64e:38997 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T20:38:51,518 INFO [M:0;d7d59561a64e:38997 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T20:38:51,518 DEBUG [M:0;d7d59561a64e:38997 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T20:38:51,518 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T20:38:51,518 DEBUG [M:0;d7d59561a64e:38997 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T20:38:51,518 DEBUG [master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135101499 {}] cleaner.HFileCleaner(306): Exit Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135101499,5,FailOnTimeoutGroup] 2024-11-20T20:38:51,519 DEBUG [master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135101498 {}] cleaner.HFileCleaner(306): Exit Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135101498,5,FailOnTimeoutGroup] 2024-11-20T20:38:51,519 INFO [M:0;d7d59561a64e:38997 {}] hbase.ChoreService(370): Chore service for: master/d7d59561a64e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T20:38:51,519 INFO [M:0;d7d59561a64e:38997 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T20:38:51,519 DEBUG [M:0;d7d59561a64e:38997 {}] master.HMaster(1795): Stopping service threads 2024-11-20T20:38:51,519 INFO [M:0;d7d59561a64e:38997 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T20:38:51,519 INFO [M:0;d7d59561a64e:38997 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T20:38:51,519 INFO [M:0;d7d59561a64e:38997 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T20:38:51,520 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T20:38:51,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T20:38:51,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:51,528 DEBUG [M:0;d7d59561a64e:38997 {}] zookeeper.ZKUtil(347): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T20:38:51,529 WARN [M:0;d7d59561a64e:38997 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T20:38:51,529 INFO [M:0;d7d59561a64e:38997 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/.lastflushedseqids 2024-11-20T20:38:51,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33795 is added to blk_1073741846_1030 (size=111) 2024-11-20T20:38:51,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46429 is added to blk_1073741846_1030 (size=111) 2024-11-20T20:38:51,537 INFO [M:0;d7d59561a64e:38997 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T20:38:51,537 INFO [M:0;d7d59561a64e:38997 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T20:38:51,537 DEBUG [M:0;d7d59561a64e:38997 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T20:38:51,537 INFO [M:0;d7d59561a64e:38997 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:38:51,537 DEBUG [M:0;d7d59561a64e:38997 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:38:51,537 DEBUG [M:0;d7d59561a64e:38997 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T20:38:51,537 DEBUG [M:0;d7d59561a64e:38997 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:38:51,537 INFO [M:0;d7d59561a64e:38997 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-20T20:38:51,538 ERROR [FSHLog-0-hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData-prefix:d7d59561a64e,38997,1732135100948 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37297,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:51,538 WARN [FSHLog-0-hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData-prefix:d7d59561a64e,38997,1732135100948 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37297,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:51,538 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog d7d59561a64e%2C38997%2C1732135100948:(num 1732135101288) roll requested 2024-11-20T20:38:51,538 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C38997%2C1732135100948.1732135131538 2024-11-20T20:38:51,544 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:51,544 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:51,544 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:51,544 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:51,545 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:51,545 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/WALs/d7d59561a64e,38997,1732135100948/d7d59561a64e%2C38997%2C1732135100948.1732135101288 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/WALs/d7d59561a64e,38997,1732135100948/d7d59561a64e%2C38997%2C1732135100948.1732135131538 2024-11-20T20:38:51,545 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37297,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:51,545 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37297,DS-ff16a4e6-a07e-4e1f-8826-71ca64cbb46b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T20:38:51,545 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/WALs/d7d59561a64e,38997,1732135100948/d7d59561a64e%2C38997%2C1732135100948.1732135101288 2024-11-20T20:38:51,546 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37553:37553),(127.0.0.1/127.0.0.1:35747:35747)] 2024-11-20T20:38:51,546 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/WALs/d7d59561a64e,38997,1732135100948/d7d59561a64e%2C38997%2C1732135100948.1732135101288 is not closed yet, will try archiving it next time 2024-11-20T20:38:51,546 WARN [IPC Server handler 3 on default port 38981 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/WALs/d7d59561a64e,38997,1732135100948/d7d59561a64e%2C38997%2C1732135100948.1732135101288 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-20T20:38:51,546 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/WALs/d7d59561a64e,38997,1732135100948/d7d59561a64e%2C38997%2C1732135100948.1732135101288 after 1ms 2024-11-20T20:38:51,568 DEBUG [M:0;d7d59561a64e:38997 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6e34f0c19c4f4a95b4a37a6a78582934 is 82, key is hbase:meta,,1/info:regioninfo/1732135102154/Put/seqid=0 2024-11-20T20:38:51,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33795 is added to blk_1073741848_1033 (size=5672) 2024-11-20T20:38:51,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46429 is added to blk_1073741848_1033 (size=5672) 2024-11-20T20:38:51,574 INFO [M:0;d7d59561a64e:38997 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6e34f0c19c4f4a95b4a37a6a78582934 2024-11-20T20:38:51,594 DEBUG [M:0;d7d59561a64e:38997 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b9c196a51fff47fba3baee8e42b17f01 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732135102680/Put/seqid=0 2024-11-20T20:38:51,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33795 is added to blk_1073741849_1034 (size=6119) 2024-11-20T20:38:51,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46429 is added to blk_1073741849_1034 (size=6119) 2024-11-20T20:38:51,600 INFO [M:0;d7d59561a64e:38997 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b9c196a51fff47fba3baee8e42b17f01 2024-11-20T20:38:51,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:38:51,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34897-0x10159f81dae0001, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:38:51,607 INFO [RS:0;d7d59561a64e:34897 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T20:38:51,607 INFO [RS:0;d7d59561a64e:34897 {}] regionserver.HRegionServer(1031): Exiting; stopping=d7d59561a64e,34897,1732135101148; zookeeper connection closed. 2024-11-20T20:38:51,608 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2e14be79 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2e14be79 2024-11-20T20:38:51,608 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T20:38:51,622 DEBUG [M:0;d7d59561a64e:38997 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/06d80a81451b40d3854e5d137761a23b is 69, key is d7d59561a64e,34897,1732135101148/rs:state/1732135101612/Put/seqid=0 2024-11-20T20:38:51,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46429 is added to blk_1073741850_1035 (size=5156) 2024-11-20T20:38:51,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33795 is added to blk_1073741850_1035 (size=5156) 2024-11-20T20:38:51,627 INFO [M:0;d7d59561a64e:38997 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/06d80a81451b40d3854e5d137761a23b 2024-11-20T20:38:51,649 DEBUG [M:0;d7d59561a64e:38997 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5a8c74f4560a43dea446307a48535bb1 is 52, key is load_balancer_on/state:d/1732135102291/Put/seqid=0 2024-11-20T20:38:51,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33795 is added to blk_1073741851_1036 (size=5056) 2024-11-20T20:38:51,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46429 is added to blk_1073741851_1036 (size=5056) 2024-11-20T20:38:51,658 INFO [M:0;d7d59561a64e:38997 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5a8c74f4560a43dea446307a48535bb1 2024-11-20T20:38:51,666 DEBUG [M:0;d7d59561a64e:38997 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6e34f0c19c4f4a95b4a37a6a78582934 as hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6e34f0c19c4f4a95b4a37a6a78582934 2024-11-20T20:38:51,671 INFO [M:0;d7d59561a64e:38997 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6e34f0c19c4f4a95b4a37a6a78582934, entries=8, sequenceid=56, filesize=5.5 K 2024-11-20T20:38:51,672 DEBUG [M:0;d7d59561a64e:38997 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b9c196a51fff47fba3baee8e42b17f01 as hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b9c196a51fff47fba3baee8e42b17f01 2024-11-20T20:38:51,678 INFO [M:0;d7d59561a64e:38997 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b9c196a51fff47fba3baee8e42b17f01, entries=6, sequenceid=56, filesize=6.0 K 2024-11-20T20:38:51,678 DEBUG [M:0;d7d59561a64e:38997 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/06d80a81451b40d3854e5d137761a23b as hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/06d80a81451b40d3854e5d137761a23b 2024-11-20T20:38:51,684 INFO [M:0;d7d59561a64e:38997 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/06d80a81451b40d3854e5d137761a23b, entries=1, sequenceid=56, filesize=5.0 K 2024-11-20T20:38:51,685 DEBUG [M:0;d7d59561a64e:38997 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5a8c74f4560a43dea446307a48535bb1 as hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5a8c74f4560a43dea446307a48535bb1 2024-11-20T20:38:51,691 INFO [M:0;d7d59561a64e:38997 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5a8c74f4560a43dea446307a48535bb1, entries=1, sequenceid=56, filesize=4.9 K 2024-11-20T20:38:51,692 INFO [M:0;d7d59561a64e:38997 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 155ms, sequenceid=56, compaction requested=false 2024-11-20T20:38:51,693 INFO [M:0;d7d59561a64e:38997 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:38:51,693 DEBUG [M:0;d7d59561a64e:38997 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732135131537Disabling compacts and flushes for region at 1732135131537Disabling writes for close at 1732135131537Obtaining lock to block concurrent updates at 1732135131537Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732135131537Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732135131538 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732135131547 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732135131547Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732135131567 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732135131567Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732135131580 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732135131594 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732135131594Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732135131605 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732135131621 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732135131621Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732135131632 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732135131648 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732135131648Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30c62190: reopening flushed file at 1732135131665 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2483c60: reopening flushed file at 1732135131671 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ef842d7: reopening flushed file at 1732135131678 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10188b07: reopening flushed file at 1732135131684 (+6 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 155ms, sequenceid=56, compaction requested=false at 1732135131692 (+8 ms)Writing region close event to WAL at 1732135131693 (+1 ms)Closed at 1732135131693 2024-11-20T20:38:51,694 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:51,694 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:51,694 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:51,694 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:51,694 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:38:51,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46429 is added to blk_1073741847_1031 (size=757) 2024-11-20T20:38:51,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33795 is added to blk_1073741847_1031 (size=757) 2024-11-20T20:38:52,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:52,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:52,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:52,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:52,643 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:52,643 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:52,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:52,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:52,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:52,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:52,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:52,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:52,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:52,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:52,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:52,671 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:52,674 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:52,674 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:53,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:53,177 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T20:38:53,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:53,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:53,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:53,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:53,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:53,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:53,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:53,196 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:53,196 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:53,196 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:53,200 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:53,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:53,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:53,203 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:38:53,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:53,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:53,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:54,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:54,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:54,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:54,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:55,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:55,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:55,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:55,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:55,547 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/WALs/d7d59561a64e,38997,1732135100948/d7d59561a64e%2C38997%2C1732135100948.1732135101288 after 4002ms 2024-11-20T20:38:55,548 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/WALs/d7d59561a64e,38997,1732135100948/d7d59561a64e%2C38997%2C1732135100948.1732135101288 to hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/oldWALs/d7d59561a64e%2C38997%2C1732135100948.1732135101288 2024-11-20T20:38:55,552 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/MasterData/oldWALs/d7d59561a64e%2C38997%2C1732135100948.1732135101288 to hdfs://localhost:38981/user/jenkins/test-data/7feb90c4-e335-f0a6-c92d-b9cb604716e3/oldWALs/d7d59561a64e%2C38997%2C1732135100948.1732135101288$masterlocalwal$ 2024-11-20T20:38:55,552 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T20:38:55,552 INFO [M:0;d7d59561a64e:38997 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T20:38:55,552 INFO [M:0;d7d59561a64e:38997 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38997 2024-11-20T20:38:55,552 INFO [M:0;d7d59561a64e:38997 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T20:38:55,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:38:55,723 INFO [M:0;d7d59561a64e:38997 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T20:38:55,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38997-0x10159f81dae0000, quorum=127.0.0.1:59087, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:38:55,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@478c28d0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:38:55,762 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@8cbc01a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:38:55,762 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:38:55,762 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f2d69a0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:38:55,763 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a0c1f66{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.log.dir/,STOPPED} 2024-11-20T20:38:55,764 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:38:55,764 WARN [BP-1325861705-172.17.0.2-1732135098701 heartbeating to localhost/127.0.0.1:38981 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:38:55,764 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:38:55,764 WARN [BP-1325861705-172.17.0.2-1732135098701 heartbeating to localhost/127.0.0.1:38981 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1325861705-172.17.0.2-1732135098701 (Datanode Uuid 9d803552-fc36-44b4-af02-b5fdff5dea2b) service to localhost/127.0.0.1:38981 2024-11-20T20:38:55,766 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/data/data3/current/BP-1325861705-172.17.0.2-1732135098701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:38:55,766 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/data/data4/current/BP-1325861705-172.17.0.2-1732135098701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:38:55,766 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:38:55,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10e0b03{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:38:55,770 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c894b0d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:38:55,770 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:38:55,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@686dd8bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:38:55,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48f4fa87{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.log.dir/,STOPPED} 2024-11-20T20:38:55,771 WARN [BP-1325861705-172.17.0.2-1732135098701 heartbeating to localhost/127.0.0.1:38981 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:38:55,771 WARN [BP-1325861705-172.17.0.2-1732135098701 heartbeating to localhost/127.0.0.1:38981 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1325861705-172.17.0.2-1732135098701 (Datanode Uuid ce80fef2-4f68-4977-8d41-3f9d66556ca9) service to localhost/127.0.0.1:38981 2024-11-20T20:38:55,772 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:38:55,772 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/data/data1/current/BP-1325861705-172.17.0.2-1732135098701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:38:55,772 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:38:55,772 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/cluster_d9d8dbdd-47f6-9fc1-e039-bfc9409ca8df/data/data2/current/BP-1325861705-172.17.0.2-1732135098701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:38:55,773 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:38:55,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@585ec7cf{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T20:38:55,779 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@27bc6094{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:38:55,779 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:38:55,779 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@209ebed6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:38:55,779 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@115eaf6c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.log.dir/,STOPPED} 2024-11-20T20:38:55,786 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T20:38:55,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T20:38:55,815 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=185 (was 156) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38981 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38981 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38981 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38981 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38981 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38981 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:38981 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38981 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 429) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=280 (was 305), ProcessCount=11 (was 11), AvailableMemoryMB=891 (was 1062) 2024-11-20T20:38:55,824 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=185, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=280, ProcessCount=11, AvailableMemoryMB=891 2024-11-20T20:38:55,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T20:38:55,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.log.dir so I do NOT create it in target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4 2024-11-20T20:38:55,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c8b59988-4e85-d070-755f-ac170b8e2648/hadoop.tmp.dir so I do NOT create it in target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4 2024-11-20T20:38:55,824 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/cluster_d068c219-f6ae-5ea6-5ef9-3ab5caa1f048, deleteOnExit=true 2024-11-20T20:38:55,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T20:38:55,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/test.cache.data in system properties and HBase conf 2024-11-20T20:38:55,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T20:38:55,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/hadoop.log.dir in system properties and HBase conf 2024-11-20T20:38:55,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T20:38:55,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T20:38:55,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T20:38:55,825 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T20:38:55,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T20:38:55,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T20:38:55,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T20:38:55,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T20:38:55,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T20:38:55,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T20:38:55,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T20:38:55,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T20:38:55,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T20:38:55,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/nfs.dump.dir in system properties and HBase conf 2024-11-20T20:38:55,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/java.io.tmpdir in system properties and HBase conf 2024-11-20T20:38:55,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T20:38:55,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T20:38:55,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T20:38:55,839 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T20:38:56,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:56,230 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:38:56,235 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:38:56,236 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:38:56,236 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:38:56,236 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T20:38:56,237 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:38:56,237 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6ff7df62{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:38:56,237 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@568dc701{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:38:56,339 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f033f5b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/java.io.tmpdir/jetty-localhost-34441-hadoop-hdfs-3_4_1-tests_jar-_-any-1604991175683227267/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T20:38:56,340 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@26bf8cb0{HTTP/1.1, (http/1.1)}{localhost:34441} 2024-11-20T20:38:56,340 INFO [Time-limited test {}] server.Server(415): Started @210047ms 2024-11-20T20:38:56,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:56,353 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T20:38:56,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:56,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T20:38:56,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T20:38:56,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T20:38:56,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-20T20:38:56,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:56,581 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:38:56,584 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:38:56,585 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:38:56,585 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:38:56,585 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T20:38:56,585 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a64c23e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:38:56,585 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60881688{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:38:56,685 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59c2bf8c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/java.io.tmpdir/jetty-localhost-44291-hadoop-hdfs-3_4_1-tests_jar-_-any-11469684147554690844/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:38:56,686 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3cd873c{HTTP/1.1, (http/1.1)}{localhost:44291} 2024-11-20T20:38:56,686 INFO [Time-limited test {}] server.Server(415): Started @210392ms 2024-11-20T20:38:56,687 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:38:56,712 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:38:56,715 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:38:56,716 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:38:56,716 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:38:56,716 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T20:38:56,717 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@755885a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:38:56,717 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fdf76c0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:38:56,822 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e17b3bd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/java.io.tmpdir/jetty-localhost-45611-hadoop-hdfs-3_4_1-tests_jar-_-any-12774455372978273292/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:38:56,822 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6677bb9f{HTTP/1.1, (http/1.1)}{localhost:45611} 2024-11-20T20:38:56,822 INFO [Time-limited test {}] server.Server(415): Started @210529ms 2024-11-20T20:38:56,824 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:38:57,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:57,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:57,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:57,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:58,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:58,185 WARN [Thread-1654 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/cluster_d068c219-f6ae-5ea6-5ef9-3ab5caa1f048/data/data1/current/BP-1289768345-172.17.0.2-1732135135850/current, will proceed with Du for space computation calculation, 2024-11-20T20:38:58,185 WARN [Thread-1655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/cluster_d068c219-f6ae-5ea6-5ef9-3ab5caa1f048/data/data2/current/BP-1289768345-172.17.0.2-1732135135850/current, will proceed with Du for space computation calculation, 2024-11-20T20:38:58,205 WARN [Thread-1618 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:38:58,207 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x312db27b67426a8e with lease ID 0x7e79d025f4616924: Processing first storage report for DS-510671da-f753-4f4c-a178-b665de0a08fc from datanode DatanodeRegistration(127.0.0.1:41943, datanodeUuid=2760e3b2-f873-46ef-a501-75e5ae179e75, infoPort=40283, infoSecurePort=0, ipcPort=38785, storageInfo=lv=-57;cid=testClusterID;nsid=1087700635;c=1732135135850) 2024-11-20T20:38:58,207 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x312db27b67426a8e with lease ID 0x7e79d025f4616924: from storage DS-510671da-f753-4f4c-a178-b665de0a08fc node DatanodeRegistration(127.0.0.1:41943, datanodeUuid=2760e3b2-f873-46ef-a501-75e5ae179e75, infoPort=40283, infoSecurePort=0, ipcPort=38785, storageInfo=lv=-57;cid=testClusterID;nsid=1087700635;c=1732135135850), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:38:58,207 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x312db27b67426a8e with lease ID 0x7e79d025f4616924: Processing first storage report for DS-00afc5ff-f748-40a1-b613-db7cbba979db from datanode DatanodeRegistration(127.0.0.1:41943, datanodeUuid=2760e3b2-f873-46ef-a501-75e5ae179e75, infoPort=40283, infoSecurePort=0, ipcPort=38785, storageInfo=lv=-57;cid=testClusterID;nsid=1087700635;c=1732135135850) 2024-11-20T20:38:58,207 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x312db27b67426a8e with lease ID 0x7e79d025f4616924: from storage DS-00afc5ff-f748-40a1-b613-db7cbba979db node DatanodeRegistration(127.0.0.1:41943, datanodeUuid=2760e3b2-f873-46ef-a501-75e5ae179e75, infoPort=40283, infoSecurePort=0, ipcPort=38785, storageInfo=lv=-57;cid=testClusterID;nsid=1087700635;c=1732135135850), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:38:58,323 WARN [Thread-1665 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/cluster_d068c219-f6ae-5ea6-5ef9-3ab5caa1f048/data/data3/current/BP-1289768345-172.17.0.2-1732135135850/current, will proceed with Du for space computation calculation, 2024-11-20T20:38:58,323 WARN [Thread-1666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/cluster_d068c219-f6ae-5ea6-5ef9-3ab5caa1f048/data/data4/current/BP-1289768345-172.17.0.2-1732135135850/current, will proceed with Du for space computation calculation, 2024-11-20T20:38:58,341 WARN [Thread-1641 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:38:58,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:58,343 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6ac65fa3e0e68c37 with lease ID 0x7e79d025f4616925: Processing first storage report for DS-95e32ad8-27d2-4dd1-94dd-a3daaf2158bb from datanode DatanodeRegistration(127.0.0.1:40109, datanodeUuid=c5a7bb34-d76f-4475-8004-63606f7ef063, infoPort=44985, infoSecurePort=0, ipcPort=42971, storageInfo=lv=-57;cid=testClusterID;nsid=1087700635;c=1732135135850) 2024-11-20T20:38:58,343 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6ac65fa3e0e68c37 with lease ID 0x7e79d025f4616925: from storage DS-95e32ad8-27d2-4dd1-94dd-a3daaf2158bb node DatanodeRegistration(127.0.0.1:40109, datanodeUuid=c5a7bb34-d76f-4475-8004-63606f7ef063, infoPort=44985, infoSecurePort=0, ipcPort=42971, storageInfo=lv=-57;cid=testClusterID;nsid=1087700635;c=1732135135850), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:38:58,343 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6ac65fa3e0e68c37 with lease ID 0x7e79d025f4616925: Processing first storage report for DS-23cb93d6-8662-49e3-9250-c75fbaf535ca from datanode DatanodeRegistration(127.0.0.1:40109, datanodeUuid=c5a7bb34-d76f-4475-8004-63606f7ef063, infoPort=44985, infoSecurePort=0, ipcPort=42971, storageInfo=lv=-57;cid=testClusterID;nsid=1087700635;c=1732135135850) 2024-11-20T20:38:58,343 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6ac65fa3e0e68c37 with lease ID 0x7e79d025f4616925: from storage DS-23cb93d6-8662-49e3-9250-c75fbaf535ca node DatanodeRegistration(127.0.0.1:40109, datanodeUuid=c5a7bb34-d76f-4475-8004-63606f7ef063, infoPort=44985, infoSecurePort=0, ipcPort=42971, storageInfo=lv=-57;cid=testClusterID;nsid=1087700635;c=1732135135850), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:38:58,358 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4 2024-11-20T20:38:58,361 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/cluster_d068c219-f6ae-5ea6-5ef9-3ab5caa1f048/zookeeper_0, clientPort=64569, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/cluster_d068c219-f6ae-5ea6-5ef9-3ab5caa1f048/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/cluster_d068c219-f6ae-5ea6-5ef9-3ab5caa1f048/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T20:38:58,361 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64569 2024-11-20T20:38:58,362 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:38:58,363 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:38:58,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741825_1001 (size=7) 2024-11-20T20:38:58,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741825_1001 (size=7) 2024-11-20T20:38:58,376 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f with version=8 2024-11-20T20:38:58,376 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/hbase-staging 2024-11-20T20:38:58,378 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d7d59561a64e:0 server-side Connection retries=45 2024-11-20T20:38:58,378 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:38:58,378 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T20:38:58,378 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T20:38:58,378 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:38:58,378 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T20:38:58,378 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T20:38:58,379 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T20:38:58,379 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46491 2024-11-20T20:38:58,381 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46491 connecting to ZooKeeper ensemble=127.0.0.1:64569 2024-11-20T20:38:58,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:464910x0, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T20:38:58,431 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46491-0x10159f8afe30000 connected 2024-11-20T20:38:58,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:58,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:58,507 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:38:58,509 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:38:58,513 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:38:58,513 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f, hbase.cluster.distributed=false 2024-11-20T20:38:58,515 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T20:38:58,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46491 2024-11-20T20:38:58,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46491 2024-11-20T20:38:58,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46491 2024-11-20T20:38:58,517 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46491 2024-11-20T20:38:58,517 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46491 2024-11-20T20:38:58,536 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d7d59561a64e:0 server-side Connection retries=45 2024-11-20T20:38:58,536 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:38:58,536 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T20:38:58,536 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T20:38:58,536 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:38:58,536 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T20:38:58,536 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T20:38:58,536 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T20:38:58,537 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46743 2024-11-20T20:38:58,539 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46743 connecting to ZooKeeper ensemble=127.0.0.1:64569 2024-11-20T20:38:58,539 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:38:58,541 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:38:58,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:467430x0, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T20:38:58,549 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:467430x0, quorum=127.0.0.1:64569, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:38:58,549 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46743-0x10159f8afe30001 connected 2024-11-20T20:38:58,549 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T20:38:58,550 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T20:38:58,550 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T20:38:58,551 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T20:38:58,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46743 2024-11-20T20:38:58,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46743 2024-11-20T20:38:58,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46743 2024-11-20T20:38:58,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46743 2024-11-20T20:38:58,553 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46743 2024-11-20T20:38:58,565 DEBUG [M:0;d7d59561a64e:46491 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d7d59561a64e:46491 2024-11-20T20:38:58,566 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d7d59561a64e,46491,1732135138378 2024-11-20T20:38:58,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:38:58,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:38:58,579 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d7d59561a64e,46491,1732135138378 2024-11-20T20:38:58,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:58,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T20:38:58,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:58,592 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T20:38:58,592 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d7d59561a64e,46491,1732135138378 from backup master directory 2024-11-20T20:38:58,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:38:58,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d7d59561a64e,46491,1732135138378 2024-11-20T20:38:58,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:38:58,602 WARN [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T20:38:58,602 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d7d59561a64e,46491,1732135138378 2024-11-20T20:38:58,607 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/hbase.id] with ID: f86b0d10-9f5a-4bae-a678-ed2333d07c6c 2024-11-20T20:38:58,607 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/.tmp/hbase.id 2024-11-20T20:38:58,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741826_1002 (size=42) 2024-11-20T20:38:58,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741826_1002 (size=42) 2024-11-20T20:38:58,616 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/.tmp/hbase.id]:[hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/hbase.id] 2024-11-20T20:38:58,629 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:38:58,629 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T20:38:58,630 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-20T20:38:58,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:58,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:58,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741827_1003 (size=196) 2024-11-20T20:38:58,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741827_1003 (size=196) 2024-11-20T20:38:58,653 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T20:38:58,653 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T20:38:58,654 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:38:58,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741828_1004 (size=1189) 2024-11-20T20:38:58,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741828_1004 (size=1189) 2024-11-20T20:38:58,661 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store 2024-11-20T20:38:58,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741829_1005 (size=34) 2024-11-20T20:38:58,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741829_1005 (size=34) 2024-11-20T20:38:58,668 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:38:58,668 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T20:38:58,668 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:38:58,668 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:38:58,668 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T20:38:58,668 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:38:58,668 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:38:58,668 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732135138668Disabling compacts and flushes for region at 1732135138668Disabling writes for close at 1732135138668Writing region close event to WAL at 1732135138668Closed at 1732135138668 2024-11-20T20:38:58,669 WARN [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/.initializing 2024-11-20T20:38:58,669 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/WALs/d7d59561a64e,46491,1732135138378 2024-11-20T20:38:58,671 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C46491%2C1732135138378, suffix=, logDir=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/WALs/d7d59561a64e,46491,1732135138378, archiveDir=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/oldWALs, maxLogs=10 2024-11-20T20:38:58,672 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C46491%2C1732135138378.1732135138672 2024-11-20T20:38:58,678 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/WALs/d7d59561a64e,46491,1732135138378/d7d59561a64e%2C46491%2C1732135138378.1732135138672 2024-11-20T20:38:58,679 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44985:44985),(127.0.0.1/127.0.0.1:40283:40283)] 2024-11-20T20:38:58,680 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:38:58,680 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:38:58,680 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:58,680 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:58,682 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:58,683 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T20:38:58,683 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:58,683 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:58,684 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:58,685 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T20:38:58,685 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:58,685 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:38:58,685 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:58,686 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T20:38:58,687 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:58,687 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:38:58,687 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:58,688 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T20:38:58,688 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:58,689 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:38:58,689 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:58,690 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:58,690 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:58,691 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:58,691 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:58,692 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T20:38:58,694 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:38:58,696 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:38:58,696 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716212, jitterRate=-0.08929051458835602}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T20:38:58,697 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732135138680Initializing all the Stores at 1732135138681 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135138681Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135138681Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135138681Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135138681Cleaning up temporary data from old regions at 1732135138691 (+10 ms)Region opened successfully at 1732135138697 (+6 ms) 2024-11-20T20:38:58,698 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T20:38:58,701 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fb7922f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d7d59561a64e/172.17.0.2:0 2024-11-20T20:38:58,702 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T20:38:58,702 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T20:38:58,702 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T20:38:58,703 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T20:38:58,703 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T20:38:58,703 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T20:38:58,703 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T20:38:58,705 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T20:38:58,706 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T20:38:58,717 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T20:38:58,718 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T20:38:58,718 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T20:38:58,728 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T20:38:58,728 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T20:38:58,729 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T20:38:58,738 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T20:38:58,739 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T20:38:58,749 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T20:38:58,751 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T20:38:58,759 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T20:38:58,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T20:38:58,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T20:38:58,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:58,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:58,771 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d7d59561a64e,46491,1732135138378, sessionid=0x10159f8afe30000, setting cluster-up flag (Was=false) 2024-11-20T20:38:58,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:58,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:58,823 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T20:38:58,824 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d7d59561a64e,46491,1732135138378 2024-11-20T20:38:58,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:58,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:58,875 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T20:38:58,877 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d7d59561a64e,46491,1732135138378 2024-11-20T20:38:58,879 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T20:38:58,881 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T20:38:58,881 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T20:38:58,882 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T20:38:58,882 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d7d59561a64e,46491,1732135138378 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T20:38:58,884 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:38:58,884 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:38:58,885 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:38:58,885 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:38:58,885 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d7d59561a64e:0, corePoolSize=10, maxPoolSize=10 2024-11-20T20:38:58,885 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:58,885 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d7d59561a64e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T20:38:58,885 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:58,886 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732135168886 2024-11-20T20:38:58,886 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T20:38:58,886 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T20:38:58,886 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T20:38:58,887 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T20:38:58,887 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T20:38:58,887 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T20:38:58,887 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:58,887 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T20:38:58,887 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:38:58,887 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T20:38:58,887 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T20:38:58,887 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T20:38:58,888 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T20:38:58,888 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T20:38:58,888 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135138888,5,FailOnTimeoutGroup] 2024-11-20T20:38:58,888 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135138888,5,FailOnTimeoutGroup] 2024-11-20T20:38:58,888 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:58,888 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T20:38:58,888 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:58,888 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:58,888 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:58,889 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T20:38:58,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741831_1007 (size=1321) 2024-11-20T20:38:58,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741831_1007 (size=1321) 2024-11-20T20:38:58,898 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T20:38:58,898 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f 2024-11-20T20:38:58,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741832_1008 (size=32) 2024-11-20T20:38:58,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741832_1008 (size=32) 2024-11-20T20:38:58,904 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:38:58,905 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T20:38:58,906 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T20:38:58,906 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:58,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:58,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T20:38:58,907 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T20:38:58,907 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:58,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:58,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T20:38:58,909 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T20:38:58,909 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:58,909 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:58,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T20:38:58,911 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T20:38:58,911 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:58,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:58,911 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T20:38:58,912 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740 2024-11-20T20:38:58,912 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740 2024-11-20T20:38:58,913 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T20:38:58,913 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T20:38:58,914 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T20:38:58,915 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T20:38:58,921 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:38:58,922 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=810634, jitterRate=0.030775025486946106}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T20:38:58,922 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732135138904Initializing all the Stores at 1732135138905 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135138905Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135138905Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135138905Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135138905Cleaning up temporary data from old regions at 1732135138913 (+8 ms)Region opened successfully at 1732135138922 (+9 ms) 2024-11-20T20:38:58,922 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T20:38:58,923 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T20:38:58,923 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T20:38:58,923 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T20:38:58,923 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T20:38:58,923 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T20:38:58,923 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732135138922Disabling compacts and flushes for region at 1732135138922Disabling writes for close at 1732135138923 (+1 ms)Writing region close event to WAL at 1732135138923Closed at 1732135138923 2024-11-20T20:38:58,925 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:38:58,925 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T20:38:58,925 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T20:38:58,926 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T20:38:58,928 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T20:38:58,954 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer(746): ClusterId : f86b0d10-9f5a-4bae-a678-ed2333d07c6c 2024-11-20T20:38:58,955 DEBUG [RS:0;d7d59561a64e:46743 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T20:38:58,960 DEBUG [RS:0;d7d59561a64e:46743 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T20:38:58,960 DEBUG [RS:0;d7d59561a64e:46743 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T20:38:58,971 DEBUG [RS:0;d7d59561a64e:46743 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T20:38:58,971 DEBUG [RS:0;d7d59561a64e:46743 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b260bda, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d7d59561a64e/172.17.0.2:0 2024-11-20T20:38:58,986 DEBUG [RS:0;d7d59561a64e:46743 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d7d59561a64e:46743 2024-11-20T20:38:58,987 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T20:38:58,987 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T20:38:58,987 DEBUG [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T20:38:58,988 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer(2659): reportForDuty to master=d7d59561a64e,46491,1732135138378 with port=46743, startcode=1732135138535 2024-11-20T20:38:58,988 DEBUG [RS:0;d7d59561a64e:46743 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T20:38:58,989 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42853, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T20:38:58,990 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46491 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d7d59561a64e,46743,1732135138535 2024-11-20T20:38:58,990 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46491 {}] master.ServerManager(517): Registering regionserver=d7d59561a64e,46743,1732135138535 2024-11-20T20:38:58,992 DEBUG [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f 2024-11-20T20:38:58,992 DEBUG [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38001 2024-11-20T20:38:58,992 DEBUG [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T20:38:59,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T20:38:59,002 DEBUG [RS:0;d7d59561a64e:46743 {}] zookeeper.ZKUtil(111): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d7d59561a64e,46743,1732135138535 2024-11-20T20:38:59,002 WARN [RS:0;d7d59561a64e:46743 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T20:38:59,002 INFO [RS:0;d7d59561a64e:46743 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:38:59,002 DEBUG [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535 2024-11-20T20:38:59,004 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d7d59561a64e,46743,1732135138535] 2024-11-20T20:38:59,006 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T20:38:59,007 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T20:38:59,008 INFO [RS:0;d7d59561a64e:46743 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T20:38:59,008 INFO [RS:0;d7d59561a64e:46743 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,008 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T20:38:59,009 INFO [RS:0;d7d59561a64e:46743 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T20:38:59,009 INFO [RS:0;d7d59561a64e:46743 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,009 DEBUG [RS:0;d7d59561a64e:46743 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:59,009 DEBUG [RS:0;d7d59561a64e:46743 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:59,009 DEBUG [RS:0;d7d59561a64e:46743 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:59,009 DEBUG [RS:0;d7d59561a64e:46743 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:59,009 DEBUG [RS:0;d7d59561a64e:46743 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:59,009 DEBUG [RS:0;d7d59561a64e:46743 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d7d59561a64e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T20:38:59,010 DEBUG [RS:0;d7d59561a64e:46743 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:59,010 DEBUG [RS:0;d7d59561a64e:46743 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:59,010 DEBUG [RS:0;d7d59561a64e:46743 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:59,010 DEBUG [RS:0;d7d59561a64e:46743 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:59,010 DEBUG [RS:0;d7d59561a64e:46743 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:59,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:59,010 DEBUG [RS:0;d7d59561a64e:46743 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:38:59,010 DEBUG [RS:0;d7d59561a64e:46743 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d7d59561a64e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T20:38:59,010 DEBUG [RS:0;d7d59561a64e:46743 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T20:38:59,011 INFO [RS:0;d7d59561a64e:46743 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,011 INFO [RS:0;d7d59561a64e:46743 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,011 INFO [RS:0;d7d59561a64e:46743 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,011 INFO [RS:0;d7d59561a64e:46743 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,011 INFO [RS:0;d7d59561a64e:46743 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,011 INFO [RS:0;d7d59561a64e:46743 {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,46743,1732135138535-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T20:38:59,027 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T20:38:59,027 INFO [RS:0;d7d59561a64e:46743 {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,46743,1732135138535-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,027 INFO [RS:0;d7d59561a64e:46743 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,027 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.Replication(171): d7d59561a64e,46743,1732135138535 started 2024-11-20T20:38:59,041 INFO [RS:0;d7d59561a64e:46743 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,041 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer(1482): Serving as d7d59561a64e,46743,1732135138535, RpcServer on d7d59561a64e/172.17.0.2:46743, sessionid=0x10159f8afe30001 2024-11-20T20:38:59,041 DEBUG [RS:0;d7d59561a64e:46743 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T20:38:59,041 DEBUG [RS:0;d7d59561a64e:46743 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d7d59561a64e,46743,1732135138535 2024-11-20T20:38:59,041 DEBUG [RS:0;d7d59561a64e:46743 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd7d59561a64e,46743,1732135138535' 2024-11-20T20:38:59,041 DEBUG [RS:0;d7d59561a64e:46743 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T20:38:59,042 DEBUG [RS:0;d7d59561a64e:46743 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T20:38:59,042 DEBUG [RS:0;d7d59561a64e:46743 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T20:38:59,042 DEBUG [RS:0;d7d59561a64e:46743 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T20:38:59,042 DEBUG [RS:0;d7d59561a64e:46743 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d7d59561a64e,46743,1732135138535 2024-11-20T20:38:59,042 DEBUG [RS:0;d7d59561a64e:46743 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd7d59561a64e,46743,1732135138535' 2024-11-20T20:38:59,043 DEBUG [RS:0;d7d59561a64e:46743 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T20:38:59,043 DEBUG [RS:0;d7d59561a64e:46743 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T20:38:59,043 DEBUG [RS:0;d7d59561a64e:46743 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T20:38:59,043 INFO [RS:0;d7d59561a64e:46743 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T20:38:59,043 INFO [RS:0;d7d59561a64e:46743 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T20:38:59,078 WARN [d7d59561a64e:46491 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T20:38:59,145 INFO [RS:0;d7d59561a64e:46743 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C46743%2C1732135138535, suffix=, logDir=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535, archiveDir=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/oldWALs, maxLogs=32 2024-11-20T20:38:59,146 INFO [RS:0;d7d59561a64e:46743 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C46743%2C1732135138535.1732135139146 2024-11-20T20:38:59,151 INFO [RS:0;d7d59561a64e:46743 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535/d7d59561a64e%2C46743%2C1732135138535.1732135139146 2024-11-20T20:38:59,152 DEBUG [RS:0;d7d59561a64e:46743 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44985:44985),(127.0.0.1/127.0.0.1:40283:40283)] 2024-11-20T20:38:59,328 DEBUG [d7d59561a64e:46491 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T20:38:59,329 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d7d59561a64e,46743,1732135138535 2024-11-20T20:38:59,330 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d7d59561a64e,46743,1732135138535, state=OPENING 2024-11-20T20:38:59,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:59,412 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T20:38:59,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:59,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:38:59,423 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T20:38:59,424 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:38:59,424 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:38:59,424 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d7d59561a64e,46743,1732135138535}] 2024-11-20T20:38:59,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:59,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:38:59,578 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T20:38:59,580 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36605, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T20:38:59,583 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T20:38:59,583 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:38:59,585 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C46743%2C1732135138535.meta, suffix=.meta, logDir=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535, archiveDir=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/oldWALs, maxLogs=32 2024-11-20T20:38:59,586 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C46743%2C1732135138535.meta.1732135139586.meta 2024-11-20T20:38:59,591 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535/d7d59561a64e%2C46743%2C1732135138535.meta.1732135139586.meta 2024-11-20T20:38:59,592 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44985:44985),(127.0.0.1/127.0.0.1:40283:40283)] 2024-11-20T20:38:59,604 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:38:59,605 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T20:38:59,605 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T20:38:59,605 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T20:38:59,605 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T20:38:59,605 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:38:59,605 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T20:38:59,605 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T20:38:59,607 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T20:38:59,607 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T20:38:59,608 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:59,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:59,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T20:38:59,609 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T20:38:59,609 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:59,609 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:59,609 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T20:38:59,610 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T20:38:59,610 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:59,610 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:59,611 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T20:38:59,611 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T20:38:59,611 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:59,612 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:38:59,612 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T20:38:59,612 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740 2024-11-20T20:38:59,613 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740 2024-11-20T20:38:59,615 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T20:38:59,615 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T20:38:59,615 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T20:38:59,616 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T20:38:59,617 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=782802, jitterRate=-0.004616037011146545}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T20:38:59,617 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T20:38:59,618 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732135139605Writing region info on filesystem at 1732135139605Initializing all the Stores at 1732135139606 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135139606Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135139606Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135139606Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135139606Cleaning up temporary data from old regions at 1732135139615 (+9 ms)Running coprocessor post-open hooks at 1732135139617 (+2 ms)Region opened successfully at 1732135139618 (+1 ms) 2024-11-20T20:38:59,619 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732135139578 2024-11-20T20:38:59,621 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T20:38:59,622 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T20:38:59,623 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d7d59561a64e,46743,1732135138535 2024-11-20T20:38:59,624 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d7d59561a64e,46743,1732135138535, state=OPEN 2024-11-20T20:38:59,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T20:38:59,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T20:38:59,663 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d7d59561a64e,46743,1732135138535 2024-11-20T20:38:59,663 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:38:59,663 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:38:59,666 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T20:38:59,666 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d7d59561a64e,46743,1732135138535 in 239 msec 2024-11-20T20:38:59,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T20:38:59,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 741 msec 2024-11-20T20:38:59,670 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:38:59,670 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T20:38:59,671 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T20:38:59,672 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d7d59561a64e,46743,1732135138535, seqNum=-1] 2024-11-20T20:38:59,672 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T20:38:59,673 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60035, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T20:38:59,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 798 msec 2024-11-20T20:38:59,680 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732135139679, completionTime=-1 2024-11-20T20:38:59,680 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T20:38:59,680 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T20:38:59,682 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T20:38:59,682 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732135199682 2024-11-20T20:38:59,682 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732135259682 2024-11-20T20:38:59,682 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-20T20:38:59,683 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,46491,1732135138378-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,683 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,46491,1732135138378-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,683 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,46491,1732135138378-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,683 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d7d59561a64e:46491, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,683 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,683 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,685 DEBUG [master/d7d59561a64e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T20:38:59,687 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.085sec 2024-11-20T20:38:59,687 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T20:38:59,687 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T20:38:59,687 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T20:38:59,688 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T20:38:59,688 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T20:38:59,688 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,46491,1732135138378-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T20:38:59,688 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,46491,1732135138378-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T20:38:59,690 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T20:38:59,690 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T20:38:59,690 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,46491,1732135138378-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:38:59,755 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fe7f59f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:38:59,755 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d7d59561a64e,46491,-1 for getting cluster id 2024-11-20T20:38:59,755 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T20:38:59,757 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f86b0d10-9f5a-4bae-a678-ed2333d07c6c' 2024-11-20T20:38:59,758 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T20:38:59,758 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f86b0d10-9f5a-4bae-a678-ed2333d07c6c" 2024-11-20T20:38:59,758 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@634f4d2f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:38:59,758 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d7d59561a64e,46491,-1] 2024-11-20T20:38:59,759 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T20:38:59,759 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:38:59,760 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38452, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T20:38:59,761 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@666a58a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:38:59,762 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T20:38:59,763 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d7d59561a64e,46743,1732135138535, seqNum=-1] 2024-11-20T20:38:59,763 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T20:38:59,765 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42916, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T20:38:59,767 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d7d59561a64e,46491,1732135138378 2024-11-20T20:38:59,767 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:38:59,770 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T20:38:59,771 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T20:38:59,772 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is d7d59561a64e,46491,1732135138378 2024-11-20T20:38:59,772 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@79cd7e70 2024-11-20T20:38:59,772 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T20:38:59,773 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38454, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T20:38:59,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T20:38:59,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T20:38:59,774 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T20:38:59,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T20:38:59,778 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T20:38:59,778 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:38:59,778 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-20T20:38:59,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T20:38:59,779 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T20:38:59,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741835_1011 (size=405) 2024-11-20T20:38:59,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741835_1011 (size=405) 2024-11-20T20:38:59,789 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 73c33ba4e606ac679d016d18026d2455, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f 2024-11-20T20:38:59,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741836_1012 (size=88) 2024-11-20T20:38:59,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741836_1012 (size=88) 2024-11-20T20:38:59,796 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:38:59,796 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 73c33ba4e606ac679d016d18026d2455, disabling compactions & flushes 2024-11-20T20:38:59,797 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:38:59,797 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:38:59,797 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. after waiting 0 ms 2024-11-20T20:38:59,797 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:38:59,797 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:38:59,797 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 73c33ba4e606ac679d016d18026d2455: Waiting for close lock at 1732135139796Disabling compacts and flushes for region at 1732135139796Disabling writes for close at 1732135139797 (+1 ms)Writing region close event to WAL at 1732135139797Closed at 1732135139797 2024-11-20T20:38:59,798 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T20:38:59,798 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732135139798"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732135139798"}]},"ts":"1732135139798"} 2024-11-20T20:38:59,801 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T20:38:59,802 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T20:38:59,802 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732135139802"}]},"ts":"1732135139802"} 2024-11-20T20:38:59,805 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-20T20:38:59,805 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=73c33ba4e606ac679d016d18026d2455, ASSIGN}] 2024-11-20T20:38:59,806 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=73c33ba4e606ac679d016d18026d2455, ASSIGN 2024-11-20T20:38:59,807 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=73c33ba4e606ac679d016d18026d2455, ASSIGN; state=OFFLINE, location=d7d59561a64e,46743,1732135138535; forceNewPlan=false, retain=false 2024-11-20T20:38:59,958 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=73c33ba4e606ac679d016d18026d2455, regionState=OPENING, regionLocation=d7d59561a64e,46743,1732135138535 2024-11-20T20:38:59,961 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=73c33ba4e606ac679d016d18026d2455, ASSIGN because future has completed 2024-11-20T20:38:59,962 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 73c33ba4e606ac679d016d18026d2455, server=d7d59561a64e,46743,1732135138535}] 2024-11-20T20:39:00,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:00,118 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:00,119 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 73c33ba4e606ac679d016d18026d2455, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455.', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:39:00,119 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 73c33ba4e606ac679d016d18026d2455 2024-11-20T20:39:00,119 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:39:00,119 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 73c33ba4e606ac679d016d18026d2455 2024-11-20T20:39:00,119 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 73c33ba4e606ac679d016d18026d2455 2024-11-20T20:39:00,121 INFO [StoreOpener-73c33ba4e606ac679d016d18026d2455-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 73c33ba4e606ac679d016d18026d2455 2024-11-20T20:39:00,123 INFO [StoreOpener-73c33ba4e606ac679d016d18026d2455-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73c33ba4e606ac679d016d18026d2455 columnFamilyName info 2024-11-20T20:39:00,123 DEBUG [StoreOpener-73c33ba4e606ac679d016d18026d2455-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:39:00,124 INFO [StoreOpener-73c33ba4e606ac679d016d18026d2455-1 {}] regionserver.HStore(327): Store=73c33ba4e606ac679d016d18026d2455/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:39:00,124 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 73c33ba4e606ac679d016d18026d2455 2024-11-20T20:39:00,125 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455 2024-11-20T20:39:00,126 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455 2024-11-20T20:39:00,126 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 73c33ba4e606ac679d016d18026d2455 2024-11-20T20:39:00,126 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 73c33ba4e606ac679d016d18026d2455 2024-11-20T20:39:00,129 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 73c33ba4e606ac679d016d18026d2455 2024-11-20T20:39:00,132 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:39:00,132 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 73c33ba4e606ac679d016d18026d2455; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=757277, jitterRate=-0.03707332909107208}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T20:39:00,132 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 73c33ba4e606ac679d016d18026d2455 2024-11-20T20:39:00,133 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 73c33ba4e606ac679d016d18026d2455: Running coprocessor pre-open hook at 1732135140120Writing region info on filesystem at 1732135140120Initializing all the Stores at 1732135140121 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135140121Cleaning up temporary data from old regions at 1732135140126 (+5 ms)Running coprocessor post-open hooks at 1732135140132 (+6 ms)Region opened successfully at 1732135140133 (+1 ms) 2024-11-20T20:39:00,134 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455., pid=6, masterSystemTime=1732135140114 2024-11-20T20:39:00,136 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:00,137 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:00,137 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=73c33ba4e606ac679d016d18026d2455, regionState=OPEN, openSeqNum=2, regionLocation=d7d59561a64e,46743,1732135138535 2024-11-20T20:39:00,140 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 73c33ba4e606ac679d016d18026d2455, server=d7d59561a64e,46743,1732135138535 because future has completed 2024-11-20T20:39:00,144 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T20:39:00,145 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 73c33ba4e606ac679d016d18026d2455, server=d7d59561a64e,46743,1732135138535 in 179 msec 2024-11-20T20:39:00,147 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T20:39:00,147 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=73c33ba4e606ac679d016d18026d2455, ASSIGN in 340 msec 2024-11-20T20:39:00,148 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T20:39:00,149 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732135140148"}]},"ts":"1732135140148"} 2024-11-20T20:39:00,151 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-20T20:39:00,152 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T20:39:00,155 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 378 msec 2024-11-20T20:39:00,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:00,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:00,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:01,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:01,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 after 68050ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:39:01,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:01,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:01,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:01,990 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T20:39:01,991 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:01,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:01,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:01,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:01,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:01,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:02,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:02,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:02,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:02,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:02,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:02,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:02,014 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:02,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:02,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:02,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:02,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:02,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:02,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:02,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:03,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:03,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:03,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:03,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:04,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:04,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:04,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:04,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:05,006 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T20:39:05,007 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-20T20:39:05,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:05,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:05,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:05,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:06,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:06,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:06,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T20:39:06,486 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T20:39:06,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T20:39:06,486 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T20:39:06,487 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T20:39:06,487 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T20:39:06,487 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T20:39:06,487 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-20T20:39:06,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:06,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:07,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:07,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:07,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:07,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:08,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:08,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:08,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:08,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:09,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:09,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:09,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:09,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:09,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T20:39:09,846 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T20:39:09,846 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-20T20:39:09,850 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T20:39:09,850 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:09,853 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455., hostname=d7d59561a64e,46743,1732135138535, seqNum=2] 2024-11-20T20:39:09,860 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T20:39:09,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T20:39:09,867 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-20T20:39:09,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-20T20:39:09,868 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T20:39:09,870 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T20:39:10,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:10,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46743 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-20T20:39:10,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:10,033 INFO [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 73c33ba4e606ac679d016d18026d2455 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T20:39:10,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/793ff87b76cd433bb515c58219774132 is 1080, key is row0001/info:/1732135149855/Put/seqid=0 2024-11-20T20:39:10,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741837_1013 (size=6033) 2024-11-20T20:39:10,068 INFO [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/793ff87b76cd433bb515c58219774132 2024-11-20T20:39:10,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741837_1013 (size=6033) 2024-11-20T20:39:10,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/793ff87b76cd433bb515c58219774132 as hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/793ff87b76cd433bb515c58219774132 2024-11-20T20:39:10,080 INFO [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/793ff87b76cd433bb515c58219774132, entries=1, sequenceid=5, filesize=5.9 K 2024-11-20T20:39:10,081 INFO [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 73c33ba4e606ac679d016d18026d2455 in 48ms, sequenceid=5, compaction requested=false 2024-11-20T20:39:10,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 73c33ba4e606ac679d016d18026d2455: 2024-11-20T20:39:10,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:10,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-20T20:39:10,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-20T20:39:10,089 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-20T20:39:10,089 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 216 msec 2024-11-20T20:39:10,092 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 228 msec 2024-11-20T20:39:10,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:10,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:10,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 after 68052ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:39:10,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:10,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta after 68058ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T20:39:11,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:11,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:11,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:11,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:12,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:12,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:12,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:12,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:13,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:13,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:13,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:13,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:14,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:14,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:14,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:14,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:15,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:15,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:15,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:15,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:16,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:16,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:16,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:16,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:17,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:17,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:17,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:17,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:18,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:18,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:18,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:18,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:19,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:19,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:19,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:19,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:19,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-20T20:39:19,916 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T20:39:19,920 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T20:39:19,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T20:39:19,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-20T20:39:19,923 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-20T20:39:19,925 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T20:39:19,925 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T20:39:20,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:20,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46743 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-20T20:39:20,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:20,080 INFO [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 73c33ba4e606ac679d016d18026d2455 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T20:39:20,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/215d0b19d84548b784071aa2825976a2 is 1080, key is row0002/info:/1732135159918/Put/seqid=0 2024-11-20T20:39:20,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741838_1014 (size=6033) 2024-11-20T20:39:20,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741838_1014 (size=6033) 2024-11-20T20:39:20,102 INFO [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/215d0b19d84548b784071aa2825976a2 2024-11-20T20:39:20,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/215d0b19d84548b784071aa2825976a2 as hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/215d0b19d84548b784071aa2825976a2 2024-11-20T20:39:20,115 INFO [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/215d0b19d84548b784071aa2825976a2, entries=1, sequenceid=9, filesize=5.9 K 2024-11-20T20:39:20,116 INFO [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 73c33ba4e606ac679d016d18026d2455 in 36ms, sequenceid=9, compaction requested=false 2024-11-20T20:39:20,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 73c33ba4e606ac679d016d18026d2455: 2024-11-20T20:39:20,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:20,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-20T20:39:20,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-20T20:39:20,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-20T20:39:20,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 193 msec 2024-11-20T20:39:20,122 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 201 msec 2024-11-20T20:39:20,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:20,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:20,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:21,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:21,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:21,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:21,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:22,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:22,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:22,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:22,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:23,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:23,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:23,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:23,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:24,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:24,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:24,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:24,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:25,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:25,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:25,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:25,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:26,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:26,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:26,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:26,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:27,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:27,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:27,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:27,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:28,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:28,357 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T20:39:28,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:28,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:28,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:29,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:29,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:29,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:29,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:29,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-20T20:39:29,957 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T20:39:29,961 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C46743%2C1732135138535.1732135169960 2024-11-20T20:39:29,967 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:29,967 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:29,968 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:29,968 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:29,968 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:29,968 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535/d7d59561a64e%2C46743%2C1732135138535.1732135139146 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535/d7d59561a64e%2C46743%2C1732135138535.1732135169960 2024-11-20T20:39:29,969 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44985:44985),(127.0.0.1/127.0.0.1:40283:40283)] 2024-11-20T20:39:29,969 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535/d7d59561a64e%2C46743%2C1732135138535.1732135139146 is not closed yet, will try archiving it next time 2024-11-20T20:39:29,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741833_1009 (size=5546) 2024-11-20T20:39:29,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741833_1009 (size=5546) 2024-11-20T20:39:29,970 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T20:39:29,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T20:39:29,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-20T20:39:29,973 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-20T20:39:29,974 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T20:39:29,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T20:39:30,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:30,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46743 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-20T20:39:30,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:30,129 INFO [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 73c33ba4e606ac679d016d18026d2455 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T20:39:30,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/4481652710dc4e009fd249e7ee22377d is 1080, key is row0003/info:/1732135169958/Put/seqid=0 2024-11-20T20:39:30,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741840_1016 (size=6033) 2024-11-20T20:39:30,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741840_1016 (size=6033) 2024-11-20T20:39:30,140 INFO [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/4481652710dc4e009fd249e7ee22377d 2024-11-20T20:39:30,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/4481652710dc4e009fd249e7ee22377d as hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/4481652710dc4e009fd249e7ee22377d 2024-11-20T20:39:30,155 INFO [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/4481652710dc4e009fd249e7ee22377d, entries=1, sequenceid=13, filesize=5.9 K 2024-11-20T20:39:30,156 INFO [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 73c33ba4e606ac679d016d18026d2455 in 28ms, sequenceid=13, compaction requested=true 2024-11-20T20:39:30,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 73c33ba4e606ac679d016d18026d2455: 2024-11-20T20:39:30,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:30,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-20T20:39:30,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-20T20:39:30,161 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-20T20:39:30,161 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-11-20T20:39:30,164 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 191 msec 2024-11-20T20:39:30,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:30,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:30,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:31,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:31,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:31,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:31,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:32,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:32,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:32,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:32,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:33,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:33,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:33,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:33,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:34,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:34,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:34,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:34,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:35,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:35,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:35,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:35,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:36,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:36,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:36,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:36,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:37,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:37,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:37,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:37,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:38,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:38,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:38,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:38,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:39,038 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:39,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:39,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:39,533 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:40,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-20T20:39:40,026 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T20:39:40,026 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T20:39:40,029 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T20:39:40,029 DEBUG [Time-limited test {}] regionserver.HStore(1541): 73c33ba4e606ac679d016d18026d2455/info is initiating minor compaction (all files) 2024-11-20T20:39:40,029 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T20:39:40,029 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:40,030 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 73c33ba4e606ac679d016d18026d2455/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:40,030 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/793ff87b76cd433bb515c58219774132, hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/215d0b19d84548b784071aa2825976a2, hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/4481652710dc4e009fd249e7ee22377d] into tmpdir=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp, totalSize=17.7 K 2024-11-20T20:39:40,031 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 793ff87b76cd433bb515c58219774132, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732135149855 2024-11-20T20:39:40,032 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 215d0b19d84548b784071aa2825976a2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732135159918 2024-11-20T20:39:40,033 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 4481652710dc4e009fd249e7ee22377d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732135169958 2024-11-20T20:39:40,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:40,044 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 73c33ba4e606ac679d016d18026d2455#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T20:39:40,044 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/be7ba25fb497453386d2cfc8204bf1ed is 1080, key is row0001/info:/1732135149855/Put/seqid=0 2024-11-20T20:39:40,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741841_1017 (size=8296) 2024-11-20T20:39:40,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741841_1017 (size=8296) 2024-11-20T20:39:40,055 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/be7ba25fb497453386d2cfc8204bf1ed as hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/be7ba25fb497453386d2cfc8204bf1ed 2024-11-20T20:39:40,061 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 73c33ba4e606ac679d016d18026d2455/info of 73c33ba4e606ac679d016d18026d2455 into be7ba25fb497453386d2cfc8204bf1ed(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T20:39:40,061 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 73c33ba4e606ac679d016d18026d2455: 2024-11-20T20:39:40,064 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C46743%2C1732135138535.1732135180063 2024-11-20T20:39:40,069 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:40,069 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:40,069 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:40,069 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:40,069 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:40,069 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535/d7d59561a64e%2C46743%2C1732135138535.1732135169960 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535/d7d59561a64e%2C46743%2C1732135138535.1732135180063 2024-11-20T20:39:40,070 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40283:40283),(127.0.0.1/127.0.0.1:44985:44985)] 2024-11-20T20:39:40,070 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535/d7d59561a64e%2C46743%2C1732135138535.1732135169960 is not closed yet, will try archiving it next time 2024-11-20T20:39:40,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741839_1015 (size=2520) 2024-11-20T20:39:40,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741839_1015 (size=2520) 2024-11-20T20:39:40,071 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535/d7d59561a64e%2C46743%2C1732135138535.1732135139146 to hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/oldWALs/d7d59561a64e%2C46743%2C1732135138535.1732135139146 2024-11-20T20:39:40,072 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T20:39:40,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T20:39:40,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-20T20:39:40,074 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-20T20:39:40,074 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T20:39:40,074 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T20:39:40,228 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46743 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-20T20:39:40,228 INFO [master/d7d59561a64e:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T20:39:40,228 INFO [master/d7d59561a64e:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T20:39:40,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:40,229 INFO [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 73c33ba4e606ac679d016d18026d2455 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T20:39:40,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/e796e8d467b3450d81a6af51442577d6 is 1080, key is row0000/info:/1732135180062/Put/seqid=0 2024-11-20T20:39:40,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741843_1019 (size=6033) 2024-11-20T20:39:40,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741843_1019 (size=6033) 2024-11-20T20:39:40,245 INFO [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/e796e8d467b3450d81a6af51442577d6 2024-11-20T20:39:40,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/e796e8d467b3450d81a6af51442577d6 as hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/e796e8d467b3450d81a6af51442577d6 2024-11-20T20:39:40,258 INFO [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/e796e8d467b3450d81a6af51442577d6, entries=1, sequenceid=18, filesize=5.9 K 2024-11-20T20:39:40,259 INFO [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 73c33ba4e606ac679d016d18026d2455 in 30ms, sequenceid=18, compaction requested=false 2024-11-20T20:39:40,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 73c33ba4e606ac679d016d18026d2455: 2024-11-20T20:39:40,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:40,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-20T20:39:40,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-20T20:39:40,263 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-20T20:39:40,263 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 187 msec 2024-11-20T20:39:40,266 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 192 msec 2024-11-20T20:39:40,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:40,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:40,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:41,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:41,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:41,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:41,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:42,040 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:42,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:42,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:42,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:43,040 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:43,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:43,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:43,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:44,041 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:44,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:44,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:44,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:45,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:45,119 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 73c33ba4e606ac679d016d18026d2455, had cached 0 bytes from a total of 14329 2024-11-20T20:39:45,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:45,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:45,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:46,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:46,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:46,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:46,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:47,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:47,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:47,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:47,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:48,044 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:48,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:48,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:48,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:49,044 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:49,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:49,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:49,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:50,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:50,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46491 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-20T20:39:50,176 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T20:39:50,179 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C46743%2C1732135138535.1732135190179 2024-11-20T20:39:50,185 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,186 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,186 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,186 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,186 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,186 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535/d7d59561a64e%2C46743%2C1732135138535.1732135180063 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535/d7d59561a64e%2C46743%2C1732135138535.1732135190179 2024-11-20T20:39:50,187 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44985:44985),(127.0.0.1/127.0.0.1:40283:40283)] 2024-11-20T20:39:50,187 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535/d7d59561a64e%2C46743%2C1732135138535.1732135180063 is not closed yet, will try archiving it next time 2024-11-20T20:39:50,187 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535/d7d59561a64e%2C46743%2C1732135138535.1732135169960 to hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/oldWALs/d7d59561a64e%2C46743%2C1732135138535.1732135169960 2024-11-20T20:39:50,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T20:39:50,187 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T20:39:50,187 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:39:50,187 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:39:50,188 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T20:39:50,188 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:39:50,188 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T20:39:50,188 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1669123685, stopped=false 2024-11-20T20:39:50,188 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d7d59561a64e,46491,1732135138378 2024-11-20T20:39:50,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741842_1018 (size=2026) 2024-11-20T20:39:50,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741842_1018 (size=2026) 2024-11-20T20:39:50,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T20:39:50,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T20:39:50,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:39:50,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:39:50,231 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T20:39:50,231 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T20:39:50,231 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:39:50,231 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:39:50,232 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:39:50,232 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd7d59561a64e,46743,1732135138535' ***** 2024-11-20T20:39:50,232 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T20:39:50,232 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:39:50,232 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T20:39:50,232 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T20:39:50,233 INFO [RS:0;d7d59561a64e:46743 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T20:39:50,233 INFO [RS:0;d7d59561a64e:46743 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T20:39:50,233 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer(3091): Received CLOSE for 73c33ba4e606ac679d016d18026d2455 2024-11-20T20:39:50,233 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer(959): stopping server d7d59561a64e,46743,1732135138535 2024-11-20T20:39:50,233 INFO [RS:0;d7d59561a64e:46743 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T20:39:50,233 INFO [RS:0;d7d59561a64e:46743 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d7d59561a64e:46743. 2024-11-20T20:39:50,233 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 73c33ba4e606ac679d016d18026d2455, disabling compactions & flushes 2024-11-20T20:39:50,233 DEBUG [RS:0;d7d59561a64e:46743 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:39:50,233 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:50,234 DEBUG [RS:0;d7d59561a64e:46743 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:39:50,234 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:50,234 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. after waiting 0 ms 2024-11-20T20:39:50,234 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:50,234 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T20:39:50,234 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T20:39:50,234 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T20:39:50,234 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T20:39:50,234 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 73c33ba4e606ac679d016d18026d2455 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T20:39:50,234 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-20T20:39:50,234 DEBUG [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 73c33ba4e606ac679d016d18026d2455=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455.} 2024-11-20T20:39:50,234 DEBUG [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 73c33ba4e606ac679d016d18026d2455 2024-11-20T20:39:50,234 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T20:39:50,234 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T20:39:50,235 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T20:39:50,235 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T20:39:50,235 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T20:39:50,235 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-20T20:39:50,239 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/a6063b8eb45a4a29845b4c7907ea81aa is 1080, key is row0001/info:/1732135190177/Put/seqid=0 2024-11-20T20:39:50,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741845_1021 (size=6033) 2024-11-20T20:39:50,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741845_1021 (size=6033) 2024-11-20T20:39:50,246 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/a6063b8eb45a4a29845b4c7907ea81aa 2024-11-20T20:39:50,253 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/.tmp/info/a6063b8eb45a4a29845b4c7907ea81aa as hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/a6063b8eb45a4a29845b4c7907ea81aa 2024-11-20T20:39:50,253 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/.tmp/info/86356bd0e7514e1c8f9cbbd192f23ed6 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455./info:regioninfo/1732135140137/Put/seqid=0 2024-11-20T20:39:50,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741846_1022 (size=7308) 2024-11-20T20:39:50,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741846_1022 (size=7308) 2024-11-20T20:39:50,259 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/.tmp/info/86356bd0e7514e1c8f9cbbd192f23ed6 2024-11-20T20:39:50,259 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/a6063b8eb45a4a29845b4c7907ea81aa, entries=1, sequenceid=22, filesize=5.9 K 2024-11-20T20:39:50,260 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 73c33ba4e606ac679d016d18026d2455 in 26ms, sequenceid=22, compaction requested=true 2024-11-20T20:39:50,261 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/793ff87b76cd433bb515c58219774132, hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/215d0b19d84548b784071aa2825976a2, hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/4481652710dc4e009fd249e7ee22377d] to archive 2024-11-20T20:39:50,261 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T20:39:50,263 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/793ff87b76cd433bb515c58219774132 to hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/793ff87b76cd433bb515c58219774132 2024-11-20T20:39:50,265 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/215d0b19d84548b784071aa2825976a2 to hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/215d0b19d84548b784071aa2825976a2 2024-11-20T20:39:50,266 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/4481652710dc4e009fd249e7ee22377d to hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/info/4481652710dc4e009fd249e7ee22377d 2024-11-20T20:39:50,266 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=d7d59561a64e:46491 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-20T20:39:50,267 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [793ff87b76cd433bb515c58219774132=6033, 215d0b19d84548b784071aa2825976a2=6033, 4481652710dc4e009fd249e7ee22377d=6033] 2024-11-20T20:39:50,270 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/73c33ba4e606ac679d016d18026d2455/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-20T20:39:50,271 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:50,271 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 73c33ba4e606ac679d016d18026d2455: Waiting for close lock at 1732135190233Running coprocessor pre-close hooks at 1732135190233Disabling compacts and flushes for region at 1732135190233Disabling writes for close at 1732135190234 (+1 ms)Obtaining lock to block concurrent updates at 1732135190234Preparing flush snapshotting stores in 73c33ba4e606ac679d016d18026d2455 at 1732135190234Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732135190234Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. at 1732135190235 (+1 ms)Flushing 73c33ba4e606ac679d016d18026d2455/info: creating writer at 1732135190235Flushing 73c33ba4e606ac679d016d18026d2455/info: appending metadata at 1732135190239 (+4 ms)Flushing 73c33ba4e606ac679d016d18026d2455/info: closing flushed file at 1732135190239Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c87c5aa: reopening flushed file at 1732135190252 (+13 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 73c33ba4e606ac679d016d18026d2455 in 26ms, sequenceid=22, compaction requested=true at 1732135190260 (+8 ms)Writing region close event to WAL at 1732135190267 (+7 ms)Running coprocessor post-close hooks at 1732135190271 (+4 ms)Closed at 1732135190271 2024-11-20T20:39:50,271 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732135139774.73c33ba4e606ac679d016d18026d2455. 2024-11-20T20:39:50,279 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/.tmp/ns/f4b97c9c84144514924c85d32641d201 is 43, key is default/ns:d/1732135139674/Put/seqid=0 2024-11-20T20:39:50,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741847_1023 (size=5153) 2024-11-20T20:39:50,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741847_1023 (size=5153) 2024-11-20T20:39:50,284 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/.tmp/ns/f4b97c9c84144514924c85d32641d201 2024-11-20T20:39:50,303 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/.tmp/table/2bea03c18a90493fa7cb8329404fec6d is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732135140148/Put/seqid=0 2024-11-20T20:39:50,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741848_1024 (size=5508) 2024-11-20T20:39:50,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741848_1024 (size=5508) 2024-11-20T20:39:50,308 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/.tmp/table/2bea03c18a90493fa7cb8329404fec6d 2024-11-20T20:39:50,314 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/.tmp/info/86356bd0e7514e1c8f9cbbd192f23ed6 as hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/info/86356bd0e7514e1c8f9cbbd192f23ed6 2024-11-20T20:39:50,320 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/info/86356bd0e7514e1c8f9cbbd192f23ed6, entries=10, sequenceid=11, filesize=7.1 K 2024-11-20T20:39:50,322 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/.tmp/ns/f4b97c9c84144514924c85d32641d201 as hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/ns/f4b97c9c84144514924c85d32641d201 2024-11-20T20:39:50,328 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/ns/f4b97c9c84144514924c85d32641d201, entries=2, sequenceid=11, filesize=5.0 K 2024-11-20T20:39:50,329 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/.tmp/table/2bea03c18a90493fa7cb8329404fec6d as hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/table/2bea03c18a90493fa7cb8329404fec6d 2024-11-20T20:39:50,334 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/table/2bea03c18a90493fa7cb8329404fec6d, entries=2, sequenceid=11, filesize=5.4 K 2024-11-20T20:39:50,335 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false 2024-11-20T20:39:50,339 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-20T20:39:50,340 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T20:39:50,340 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T20:39:50,340 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732135190234Running coprocessor pre-close hooks at 1732135190234Disabling compacts and flushes for region at 1732135190234Disabling writes for close at 1732135190235 (+1 ms)Obtaining lock to block concurrent updates at 1732135190235Preparing flush snapshotting stores in 1588230740 at 1732135190235Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732135190235Flushing stores of hbase:meta,,1.1588230740 at 1732135190236 (+1 ms)Flushing 1588230740/info: creating writer at 1732135190236Flushing 1588230740/info: appending metadata at 1732135190253 (+17 ms)Flushing 1588230740/info: closing flushed file at 1732135190253Flushing 1588230740/ns: creating writer at 1732135190264 (+11 ms)Flushing 1588230740/ns: appending metadata at 1732135190278 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732135190278Flushing 1588230740/table: creating writer at 1732135190289 (+11 ms)Flushing 1588230740/table: appending metadata at 1732135190303 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732135190303Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@53b3cdf3: reopening flushed file at 1732135190313 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2be2183e: reopening flushed file at 1732135190321 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43098fb3: reopening flushed file at 1732135190328 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false at 1732135190335 (+7 ms)Writing region close event to WAL at 1732135190336 (+1 ms)Running coprocessor post-close hooks at 1732135190340 (+4 ms)Closed at 1732135190340 2024-11-20T20:39:50,340 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T20:39:50,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:50,434 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer(976): stopping server d7d59561a64e,46743,1732135138535; all regions closed. 2024-11-20T20:39:50,435 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,435 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,435 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,435 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,436 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741834_1010 (size=3306) 2024-11-20T20:39:50,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741834_1010 (size=3306) 2024-11-20T20:39:50,441 DEBUG [RS:0;d7d59561a64e:46743 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/oldWALs 2024-11-20T20:39:50,441 INFO [RS:0;d7d59561a64e:46743 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d7d59561a64e%2C46743%2C1732135138535.meta:.meta(num 1732135139586) 2024-11-20T20:39:50,441 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,441 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,442 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,442 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,442 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741844_1020 (size=1252) 2024-11-20T20:39:50,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741844_1020 (size=1252) 2024-11-20T20:39:50,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:50,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:50,589 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/WALs/d7d59561a64e,46743,1732135138535/d7d59561a64e%2C46743%2C1732135138535.1732135180063 to hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/oldWALs/d7d59561a64e%2C46743%2C1732135138535.1732135180063 2024-11-20T20:39:50,593 DEBUG [RS:0;d7d59561a64e:46743 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/oldWALs 2024-11-20T20:39:50,593 INFO [RS:0;d7d59561a64e:46743 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d7d59561a64e%2C46743%2C1732135138535:(num 1732135190179) 2024-11-20T20:39:50,593 DEBUG [RS:0;d7d59561a64e:46743 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:39:50,593 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T20:39:50,593 INFO [RS:0;d7d59561a64e:46743 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T20:39:50,593 INFO [RS:0;d7d59561a64e:46743 {}] hbase.ChoreService(370): Chore service for: regionserver/d7d59561a64e:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-20T20:39:50,594 INFO [RS:0;d7d59561a64e:46743 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T20:39:50,594 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T20:39:50,594 INFO [RS:0;d7d59561a64e:46743 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46743 2024-11-20T20:39:50,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d7d59561a64e,46743,1732135138535 2024-11-20T20:39:50,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T20:39:50,641 INFO [RS:0;d7d59561a64e:46743 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T20:39:50,651 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d7d59561a64e,46743,1732135138535] 2024-11-20T20:39:50,662 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d7d59561a64e,46743,1732135138535 already deleted, retry=false 2024-11-20T20:39:50,662 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d7d59561a64e,46743,1732135138535 expired; onlineServers=0 2024-11-20T20:39:50,662 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd7d59561a64e,46491,1732135138378' ***** 2024-11-20T20:39:50,662 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T20:39:50,662 INFO [M:0;d7d59561a64e:46491 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T20:39:50,662 INFO [M:0;d7d59561a64e:46491 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T20:39:50,662 DEBUG [M:0;d7d59561a64e:46491 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T20:39:50,662 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T20:39:50,662 DEBUG [M:0;d7d59561a64e:46491 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T20:39:50,662 DEBUG [master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135138888 {}] cleaner.HFileCleaner(306): Exit Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135138888,5,FailOnTimeoutGroup] 2024-11-20T20:39:50,662 DEBUG [master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135138888 {}] cleaner.HFileCleaner(306): Exit Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135138888,5,FailOnTimeoutGroup] 2024-11-20T20:39:50,662 INFO [M:0;d7d59561a64e:46491 {}] hbase.ChoreService(370): Chore service for: master/d7d59561a64e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T20:39:50,662 INFO [M:0;d7d59561a64e:46491 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T20:39:50,663 DEBUG [M:0;d7d59561a64e:46491 {}] master.HMaster(1795): Stopping service threads 2024-11-20T20:39:50,663 INFO [M:0;d7d59561a64e:46491 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T20:39:50,663 INFO [M:0;d7d59561a64e:46491 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T20:39:50,663 INFO [M:0;d7d59561a64e:46491 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T20:39:50,663 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T20:39:50,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T20:39:50,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:39:50,673 DEBUG [M:0;d7d59561a64e:46491 {}] zookeeper.ZKUtil(347): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T20:39:50,673 WARN [M:0;d7d59561a64e:46491 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T20:39:50,673 INFO [M:0;d7d59561a64e:46491 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/.lastflushedseqids 2024-11-20T20:39:50,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741849_1025 (size=130) 2024-11-20T20:39:50,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741849_1025 (size=130) 2024-11-20T20:39:50,678 INFO [M:0;d7d59561a64e:46491 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T20:39:50,678 INFO [M:0;d7d59561a64e:46491 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T20:39:50,679 DEBUG [M:0;d7d59561a64e:46491 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T20:39:50,679 INFO [M:0;d7d59561a64e:46491 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:39:50,679 DEBUG [M:0;d7d59561a64e:46491 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:39:50,679 DEBUG [M:0;d7d59561a64e:46491 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T20:39:50,679 DEBUG [M:0;d7d59561a64e:46491 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:39:50,679 INFO [M:0;d7d59561a64e:46491 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.60 KB heapSize=55.01 KB 2024-11-20T20:39:50,696 DEBUG [M:0;d7d59561a64e:46491 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/563cbf0047f046fbb0a234eb2e13c10a is 82, key is hbase:meta,,1/info:regioninfo/1732135139622/Put/seqid=0 2024-11-20T20:39:50,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741850_1026 (size=5672) 2024-11-20T20:39:50,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741850_1026 (size=5672) 2024-11-20T20:39:50,701 INFO [M:0;d7d59561a64e:46491 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/563cbf0047f046fbb0a234eb2e13c10a 2024-11-20T20:39:50,724 DEBUG [M:0;d7d59561a64e:46491 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/27d9759fe03a44f18ebaaf12a0f05727 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732135140154/Put/seqid=0 2024-11-20T20:39:50,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741851_1027 (size=7824) 2024-11-20T20:39:50,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741851_1027 (size=7824) 2024-11-20T20:39:50,729 INFO [M:0;d7d59561a64e:46491 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.00 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/27d9759fe03a44f18ebaaf12a0f05727 2024-11-20T20:39:50,734 INFO [M:0;d7d59561a64e:46491 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 27d9759fe03a44f18ebaaf12a0f05727 2024-11-20T20:39:50,751 DEBUG [M:0;d7d59561a64e:46491 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a60fae3edf441f28345d9100e467814 is 69, key is d7d59561a64e,46743,1732135138535/rs:state/1732135138990/Put/seqid=0 2024-11-20T20:39:50,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:39:50,752 INFO [RS:0;d7d59561a64e:46743 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T20:39:50,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46743-0x10159f8afe30001, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:39:50,752 INFO [RS:0;d7d59561a64e:46743 {}] regionserver.HRegionServer(1031): Exiting; stopping=d7d59561a64e,46743,1732135138535; zookeeper connection closed. 2024-11-20T20:39:50,752 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@27ccac97 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@27ccac97 2024-11-20T20:39:50,752 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T20:39:50,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741852_1028 (size=5156) 2024-11-20T20:39:50,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741852_1028 (size=5156) 2024-11-20T20:39:50,756 INFO [M:0;d7d59561a64e:46491 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a60fae3edf441f28345d9100e467814 2024-11-20T20:39:50,779 DEBUG [M:0;d7d59561a64e:46491 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1ed381b170fa48b2bfe8e9e9692d9670 is 52, key is load_balancer_on/state:d/1732135139769/Put/seqid=0 2024-11-20T20:39:50,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741853_1029 (size=5056) 2024-11-20T20:39:50,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741853_1029 (size=5056) 2024-11-20T20:39:50,784 INFO [M:0;d7d59561a64e:46491 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1ed381b170fa48b2bfe8e9e9692d9670 2024-11-20T20:39:50,789 DEBUG [M:0;d7d59561a64e:46491 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/563cbf0047f046fbb0a234eb2e13c10a as hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/563cbf0047f046fbb0a234eb2e13c10a 2024-11-20T20:39:50,794 INFO [M:0;d7d59561a64e:46491 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/563cbf0047f046fbb0a234eb2e13c10a, entries=8, sequenceid=121, filesize=5.5 K 2024-11-20T20:39:50,795 DEBUG [M:0;d7d59561a64e:46491 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/27d9759fe03a44f18ebaaf12a0f05727 as hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/27d9759fe03a44f18ebaaf12a0f05727 2024-11-20T20:39:50,799 INFO [M:0;d7d59561a64e:46491 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 27d9759fe03a44f18ebaaf12a0f05727 2024-11-20T20:39:50,800 INFO [M:0;d7d59561a64e:46491 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/27d9759fe03a44f18ebaaf12a0f05727, entries=14, sequenceid=121, filesize=7.6 K 2024-11-20T20:39:50,801 DEBUG [M:0;d7d59561a64e:46491 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a60fae3edf441f28345d9100e467814 as hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5a60fae3edf441f28345d9100e467814 2024-11-20T20:39:50,805 INFO [M:0;d7d59561a64e:46491 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5a60fae3edf441f28345d9100e467814, entries=1, sequenceid=121, filesize=5.0 K 2024-11-20T20:39:50,806 DEBUG [M:0;d7d59561a64e:46491 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1ed381b170fa48b2bfe8e9e9692d9670 as hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1ed381b170fa48b2bfe8e9e9692d9670 2024-11-20T20:39:50,811 INFO [M:0;d7d59561a64e:46491 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38001/user/jenkins/test-data/82a8974f-398f-b23c-8119-7666ea68b23f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1ed381b170fa48b2bfe8e9e9692d9670, entries=1, sequenceid=121, filesize=4.9 K 2024-11-20T20:39:50,813 INFO [M:0;d7d59561a64e:46491 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=121, compaction requested=false 2024-11-20T20:39:50,820 INFO [M:0;d7d59561a64e:46491 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:39:50,820 DEBUG [M:0;d7d59561a64e:46491 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732135190679Disabling compacts and flushes for region at 1732135190679Disabling writes for close at 1732135190679Obtaining lock to block concurrent updates at 1732135190679Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732135190679Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44650, getHeapSize=56264, getOffHeapSize=0, getCellsCount=140 at 1732135190679Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732135190680 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732135190680Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732135190695 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732135190696 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732135190706 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732135190723 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732135190723Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732135190734 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732135190750 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732135190750Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732135190760 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732135190778 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732135190778Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20742ed2: reopening flushed file at 1732135190788 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@688302c: reopening flushed file at 1732135190794 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c909453: reopening flushed file at 1732135190800 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c475dd6: reopening flushed file at 1732135190806 (+6 ms)Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=121, compaction requested=false at 1732135190813 (+7 ms)Writing region close event to WAL at 1732135190820 (+7 ms)Closed at 1732135190820 2024-11-20T20:39:50,820 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,821 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,821 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,821 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,821 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:39:50,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741830_1006 (size=53047) 2024-11-20T20:39:50,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40109 is added to blk_1073741830_1006 (size=53047) 2024-11-20T20:39:50,823 INFO [M:0;d7d59561a64e:46491 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T20:39:50,823 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T20:39:50,824 INFO [M:0;d7d59561a64e:46491 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46491 2024-11-20T20:39:50,824 INFO [M:0;d7d59561a64e:46491 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T20:39:50,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:39:50,930 INFO [M:0;d7d59561a64e:46491 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T20:39:50,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46491-0x10159f8afe30000, quorum=127.0.0.1:64569, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:39:50,968 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e17b3bd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:39:50,968 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6677bb9f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:39:50,969 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:39:50,969 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fdf76c0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:39:50,969 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@755885a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/hadoop.log.dir/,STOPPED} 2024-11-20T20:39:50,970 WARN [BP-1289768345-172.17.0.2-1732135135850 heartbeating to localhost/127.0.0.1:38001 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:39:50,970 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:39:50,970 WARN [BP-1289768345-172.17.0.2-1732135135850 heartbeating to localhost/127.0.0.1:38001 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1289768345-172.17.0.2-1732135135850 (Datanode Uuid c5a7bb34-d76f-4475-8004-63606f7ef063) service to localhost/127.0.0.1:38001 2024-11-20T20:39:50,970 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:39:50,971 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/cluster_d068c219-f6ae-5ea6-5ef9-3ab5caa1f048/data/data3/current/BP-1289768345-172.17.0.2-1732135135850 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:39:50,971 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/cluster_d068c219-f6ae-5ea6-5ef9-3ab5caa1f048/data/data4/current/BP-1289768345-172.17.0.2-1732135135850 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:39:50,971 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:39:50,974 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59c2bf8c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:39:50,974 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3cd873c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:39:50,974 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:39:50,974 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60881688{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:39:50,974 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a64c23e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/hadoop.log.dir/,STOPPED} 2024-11-20T20:39:50,976 WARN [BP-1289768345-172.17.0.2-1732135135850 heartbeating to localhost/127.0.0.1:38001 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:39:50,976 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:39:50,976 WARN [BP-1289768345-172.17.0.2-1732135135850 heartbeating to localhost/127.0.0.1:38001 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1289768345-172.17.0.2-1732135135850 (Datanode Uuid 2760e3b2-f873-46ef-a501-75e5ae179e75) service to localhost/127.0.0.1:38001 2024-11-20T20:39:50,976 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:39:50,976 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/cluster_d068c219-f6ae-5ea6-5ef9-3ab5caa1f048/data/data1/current/BP-1289768345-172.17.0.2-1732135135850 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:39:50,976 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/cluster_d068c219-f6ae-5ea6-5ef9-3ab5caa1f048/data/data2/current/BP-1289768345-172.17.0.2-1732135135850 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:39:50,977 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:39:50,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f033f5b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T20:39:50,983 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@26bf8cb0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:39:50,983 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:39:50,983 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@568dc701{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:39:50,983 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6ff7df62{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/hadoop.log.dir/,STOPPED} 2024-11-20T20:39:50,989 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T20:39:51,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T20:39:51,013 INFO [regionserver/d7d59561a64e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T20:39:51,018 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=212 (was 185) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38001 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38001 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38001 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38001 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38001 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38001 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:38001 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38001 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38001 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=485 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=246 (was 280), ProcessCount=11 (was 11), AvailableMemoryMB=730 (was 891) 2024-11-20T20:39:51,026 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=211, OpenFileDescriptor=485, MaxFileDescriptor=1048576, SystemLoadAverage=246, ProcessCount=11, AvailableMemoryMB=730 2024-11-20T20:39:51,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T20:39:51,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/hadoop.log.dir so I do NOT create it in target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366 2024-11-20T20:39:51,027 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/eac8b6fc-bada-55ad-bf83-a1df72afdbc4/hadoop.tmp.dir so I do NOT create it in target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366 2024-11-20T20:39:51,027 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/cluster_ef69f12f-21d2-d7d4-1d81-ad28bf38015f, deleteOnExit=true 2024-11-20T20:39:51,027 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T20:39:51,027 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/test.cache.data in system properties and HBase conf 2024-11-20T20:39:51,027 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T20:39:51,027 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/hadoop.log.dir in system properties and HBase conf 2024-11-20T20:39:51,027 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T20:39:51,027 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T20:39:51,027 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T20:39:51,027 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T20:39:51,027 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T20:39:51,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T20:39:51,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T20:39:51,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T20:39:51,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T20:39:51,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T20:39:51,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T20:39:51,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T20:39:51,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T20:39:51,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/nfs.dump.dir in system properties and HBase conf 2024-11-20T20:39:51,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/java.io.tmpdir in system properties and HBase conf 2024-11-20T20:39:51,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T20:39:51,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T20:39:51,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T20:39:51,041 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T20:39:51,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:51,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:51,415 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:39:51,420 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:39:51,421 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:39:51,421 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:39:51,421 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T20:39:51,422 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:39:51,422 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8d81aab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:39:51,422 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@552872f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:39:51,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:51,527 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d701b2{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/java.io.tmpdir/jetty-localhost-35439-hadoop-hdfs-3_4_1-tests_jar-_-any-13718686780342415467/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T20:39:51,528 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6150e164{HTTP/1.1, (http/1.1)}{localhost:35439} 2024-11-20T20:39:51,528 INFO [Time-limited test {}] server.Server(415): Started @265234ms 2024-11-20T20:39:51,539 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T20:39:51,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:51,772 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:39:51,776 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:39:51,777 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:39:51,777 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:39:51,777 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T20:39:51,777 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@195069a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:39:51,778 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a67f565{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:39:51,882 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b4813f1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/java.io.tmpdir/jetty-localhost-35105-hadoop-hdfs-3_4_1-tests_jar-_-any-4185382424248661052/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:39:51,882 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ef16bcf{HTTP/1.1, (http/1.1)}{localhost:35105} 2024-11-20T20:39:51,882 INFO [Time-limited test {}] server.Server(415): Started @265589ms 2024-11-20T20:39:51,883 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:39:51,929 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:39:51,931 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:39:51,932 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:39:51,932 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:39:51,932 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T20:39:51,933 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@248d74c2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:39:51,933 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65f8aed6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:39:52,040 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43273902{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/java.io.tmpdir/jetty-localhost-43899-hadoop-hdfs-3_4_1-tests_jar-_-any-8854579702568386843/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:39:52,040 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51d47a1d{HTTP/1.1, (http/1.1)}{localhost:43899} 2024-11-20T20:39:52,040 INFO [Time-limited test {}] server.Server(415): Started @265747ms 2024-11-20T20:39:52,041 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:39:52,046 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:52,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:52,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:52,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:53,047 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:53,061 WARN [Thread-1971 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/cluster_ef69f12f-21d2-d7d4-1d81-ad28bf38015f/data/data1/current/BP-1587900785-172.17.0.2-1732135191054/current, will proceed with Du for space computation calculation, 2024-11-20T20:39:53,062 WARN [Thread-1972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/cluster_ef69f12f-21d2-d7d4-1d81-ad28bf38015f/data/data2/current/BP-1587900785-172.17.0.2-1732135191054/current, will proceed with Du for space computation calculation, 2024-11-20T20:39:53,088 WARN [Thread-1935 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:39:53,090 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x378e7087e8529d65 with lease ID 0x70dcde4fa6bd90cf: Processing first storage report for DS-42a1dab6-edd1-4c46-b8f0-2f78920a7878 from datanode DatanodeRegistration(127.0.0.1:45765, datanodeUuid=ddb82209-bcfa-4a98-8282-cb9ecdb9474f, infoPort=40741, infoSecurePort=0, ipcPort=41601, storageInfo=lv=-57;cid=testClusterID;nsid=4852747;c=1732135191054) 2024-11-20T20:39:53,090 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x378e7087e8529d65 with lease ID 0x70dcde4fa6bd90cf: from storage DS-42a1dab6-edd1-4c46-b8f0-2f78920a7878 node DatanodeRegistration(127.0.0.1:45765, datanodeUuid=ddb82209-bcfa-4a98-8282-cb9ecdb9474f, infoPort=40741, infoSecurePort=0, ipcPort=41601, storageInfo=lv=-57;cid=testClusterID;nsid=4852747;c=1732135191054), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T20:39:53,090 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x378e7087e8529d65 with lease ID 0x70dcde4fa6bd90cf: Processing first storage report for DS-820f76df-66d7-41cd-997c-280db223618f from datanode DatanodeRegistration(127.0.0.1:45765, datanodeUuid=ddb82209-bcfa-4a98-8282-cb9ecdb9474f, infoPort=40741, infoSecurePort=0, ipcPort=41601, storageInfo=lv=-57;cid=testClusterID;nsid=4852747;c=1732135191054) 2024-11-20T20:39:53,090 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x378e7087e8529d65 with lease ID 0x70dcde4fa6bd90cf: from storage DS-820f76df-66d7-41cd-997c-280db223618f node DatanodeRegistration(127.0.0.1:45765, datanodeUuid=ddb82209-bcfa-4a98-8282-cb9ecdb9474f, infoPort=40741, infoSecurePort=0, ipcPort=41601, storageInfo=lv=-57;cid=testClusterID;nsid=4852747;c=1732135191054), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:39:53,195 WARN [Thread-1982 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/cluster_ef69f12f-21d2-d7d4-1d81-ad28bf38015f/data/data3/current/BP-1587900785-172.17.0.2-1732135191054/current, will proceed with Du for space computation calculation, 2024-11-20T20:39:53,195 WARN [Thread-1983 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/cluster_ef69f12f-21d2-d7d4-1d81-ad28bf38015f/data/data4/current/BP-1587900785-172.17.0.2-1732135191054/current, will proceed with Du for space computation calculation, 2024-11-20T20:39:53,215 WARN [Thread-1958 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:39:53,217 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x92cc422841c32d16 with lease ID 0x70dcde4fa6bd90d0: Processing first storage report for DS-1a0236ec-534c-4cca-8a45-a37b1e3677ce from datanode DatanodeRegistration(127.0.0.1:40355, datanodeUuid=27169cd1-b803-48b7-8a75-f42c59c8197a, infoPort=37349, infoSecurePort=0, ipcPort=37117, storageInfo=lv=-57;cid=testClusterID;nsid=4852747;c=1732135191054) 2024-11-20T20:39:53,217 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x92cc422841c32d16 with lease ID 0x70dcde4fa6bd90d0: from storage DS-1a0236ec-534c-4cca-8a45-a37b1e3677ce node DatanodeRegistration(127.0.0.1:40355, datanodeUuid=27169cd1-b803-48b7-8a75-f42c59c8197a, infoPort=37349, infoSecurePort=0, ipcPort=37117, storageInfo=lv=-57;cid=testClusterID;nsid=4852747;c=1732135191054), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:39:53,217 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x92cc422841c32d16 with lease ID 0x70dcde4fa6bd90d0: Processing first storage report for DS-482eb504-0151-4730-ae15-47e1e57dc970 from datanode DatanodeRegistration(127.0.0.1:40355, datanodeUuid=27169cd1-b803-48b7-8a75-f42c59c8197a, infoPort=37349, infoSecurePort=0, ipcPort=37117, storageInfo=lv=-57;cid=testClusterID;nsid=4852747;c=1732135191054) 2024-11-20T20:39:53,217 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x92cc422841c32d16 with lease ID 0x70dcde4fa6bd90d0: from storage DS-482eb504-0151-4730-ae15-47e1e57dc970 node DatanodeRegistration(127.0.0.1:40355, datanodeUuid=27169cd1-b803-48b7-8a75-f42c59c8197a, infoPort=37349, infoSecurePort=0, ipcPort=37117, storageInfo=lv=-57;cid=testClusterID;nsid=4852747;c=1732135191054), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:39:53,270 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366 2024-11-20T20:39:53,272 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/cluster_ef69f12f-21d2-d7d4-1d81-ad28bf38015f/zookeeper_0, clientPort=57606, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/cluster_ef69f12f-21d2-d7d4-1d81-ad28bf38015f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/cluster_ef69f12f-21d2-d7d4-1d81-ad28bf38015f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T20:39:53,273 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57606 2024-11-20T20:39:53,273 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:39:53,275 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:39:53,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741825_1001 (size=7) 2024-11-20T20:39:53,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741825_1001 (size=7) 2024-11-20T20:39:53,292 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d with version=8 2024-11-20T20:39:53,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/hbase-staging 2024-11-20T20:39:53,295 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d7d59561a64e:0 server-side Connection retries=45 2024-11-20T20:39:53,295 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:39:53,295 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T20:39:53,295 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T20:39:53,295 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:39:53,295 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T20:39:53,295 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T20:39:53,295 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T20:39:53,296 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41021 2024-11-20T20:39:53,297 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41021 connecting to ZooKeeper ensemble=127.0.0.1:57606 2024-11-20T20:39:53,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:410210x0, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T20:39:53,347 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41021-0x10159f986630000 connected 2024-11-20T20:39:53,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:53,431 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:39:53,433 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:39:53,435 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:39:53,436 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d, hbase.cluster.distributed=false 2024-11-20T20:39:53,438 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T20:39:53,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41021 2024-11-20T20:39:53,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41021 2024-11-20T20:39:53,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41021 2024-11-20T20:39:53,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41021 2024-11-20T20:39:53,441 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41021 2024-11-20T20:39:53,458 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d7d59561a64e:0 server-side Connection retries=45 2024-11-20T20:39:53,458 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:39:53,458 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T20:39:53,458 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T20:39:53,458 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:39:53,458 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T20:39:53,458 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T20:39:53,458 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T20:39:53,459 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39133 2024-11-20T20:39:53,460 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39133 connecting to ZooKeeper ensemble=127.0.0.1:57606 2024-11-20T20:39:53,461 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:39:53,462 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:39:53,472 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:391330x0, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T20:39:53,473 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:39:53,473 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39133-0x10159f986630001 connected 2024-11-20T20:39:53,473 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T20:39:53,474 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T20:39:53,474 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T20:39:53,475 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T20:39:53,476 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39133 2024-11-20T20:39:53,476 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39133 2024-11-20T20:39:53,476 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39133 2024-11-20T20:39:53,477 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39133 2024-11-20T20:39:53,477 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39133 2024-11-20T20:39:53,489 DEBUG [M:0;d7d59561a64e:41021 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d7d59561a64e:41021 2024-11-20T20:39:53,489 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d7d59561a64e,41021,1732135193294 2024-11-20T20:39:53,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:39:53,500 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:39:53,501 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d7d59561a64e,41021,1732135193294 2024-11-20T20:39:53,511 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T20:39:53,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:39:53,511 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:39:53,511 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T20:39:53,512 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d7d59561a64e,41021,1732135193294 from backup master directory 2024-11-20T20:39:53,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d7d59561a64e,41021,1732135193294 2024-11-20T20:39:53,521 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:39:53,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:39:53,521 WARN [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T20:39:53,522 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d7d59561a64e,41021,1732135193294 2024-11-20T20:39:53,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:53,526 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/hbase.id] with ID: 20bd1667-919d-44ff-a018-43ca623c19d8 2024-11-20T20:39:53,526 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/.tmp/hbase.id 2024-11-20T20:39:53,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741826_1002 (size=42) 2024-11-20T20:39:53,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741826_1002 (size=42) 2024-11-20T20:39:53,533 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/.tmp/hbase.id]:[hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/hbase.id] 2024-11-20T20:39:53,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:53,546 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:39:53,546 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T20:39:53,547 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-20T20:39:53,556 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:39:53,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:39:53,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741827_1003 (size=196) 2024-11-20T20:39:53,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741827_1003 (size=196) 2024-11-20T20:39:53,564 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T20:39:53,565 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T20:39:53,565 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:39:53,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741828_1004 (size=1189) 2024-11-20T20:39:53,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741828_1004 (size=1189) 2024-11-20T20:39:53,574 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store 2024-11-20T20:39:53,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741829_1005 (size=34) 2024-11-20T20:39:53,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741829_1005 (size=34) 2024-11-20T20:39:53,585 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:39:53,585 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T20:39:53,585 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:39:53,586 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:39:53,586 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T20:39:53,586 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:39:53,586 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:39:53,586 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732135193585Disabling compacts and flushes for region at 1732135193585Disabling writes for close at 1732135193586 (+1 ms)Writing region close event to WAL at 1732135193586Closed at 1732135193586 2024-11-20T20:39:53,587 WARN [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/.initializing 2024-11-20T20:39:53,587 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/WALs/d7d59561a64e,41021,1732135193294 2024-11-20T20:39:53,589 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C41021%2C1732135193294, suffix=, logDir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/WALs/d7d59561a64e,41021,1732135193294, archiveDir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/oldWALs, maxLogs=10 2024-11-20T20:39:53,589 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C41021%2C1732135193294.1732135193589 2024-11-20T20:39:53,593 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/WALs/d7d59561a64e,41021,1732135193294/d7d59561a64e%2C41021%2C1732135193294.1732135193589 2024-11-20T20:39:53,594 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40741:40741),(127.0.0.1/127.0.0.1:37349:37349)] 2024-11-20T20:39:53,594 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:39:53,595 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:39:53,595 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:39:53,595 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:39:53,596 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:39:53,597 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T20:39:53,597 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:39:53,598 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:39:53,598 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:39:53,599 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T20:39:53,599 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:39:53,599 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:39:53,599 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:39:53,600 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T20:39:53,600 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:39:53,601 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:39:53,601 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:39:53,602 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T20:39:53,602 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:39:53,602 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:39:53,603 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:39:53,603 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:39:53,604 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:39:53,605 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:39:53,605 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:39:53,605 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T20:39:53,606 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:39:53,608 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:39:53,609 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=729406, jitterRate=-0.07251298427581787}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T20:39:53,609 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732135193595Initializing all the Stores at 1732135193596 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135193596Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135193596Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135193596Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135193596Cleaning up temporary data from old regions at 1732135193605 (+9 ms)Region opened successfully at 1732135193609 (+4 ms) 2024-11-20T20:39:53,610 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T20:39:53,613 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21049d44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d7d59561a64e/172.17.0.2:0 2024-11-20T20:39:53,613 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T20:39:53,614 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T20:39:53,614 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T20:39:53,614 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T20:39:53,636 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 22 msec 2024-11-20T20:39:53,637 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T20:39:53,637 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T20:39:53,639 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T20:39:53,640 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T20:39:53,651 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T20:39:53,652 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T20:39:53,652 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T20:39:53,662 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T20:39:53,662 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T20:39:53,663 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T20:39:53,672 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T20:39:53,673 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T20:39:53,683 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T20:39:53,685 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T20:39:53,693 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T20:39:53,704 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T20:39:53,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T20:39:53,704 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:39:53,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:39:53,705 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d7d59561a64e,41021,1732135193294, sessionid=0x10159f986630000, setting cluster-up flag (Was=false) 2024-11-20T20:39:53,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:39:53,725 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:39:53,756 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T20:39:53,758 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d7d59561a64e,41021,1732135193294 2024-11-20T20:39:53,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:39:53,778 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:39:53,809 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T20:39:53,811 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d7d59561a64e,41021,1732135193294 2024-11-20T20:39:53,813 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T20:39:53,815 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T20:39:53,815 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T20:39:53,815 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T20:39:53,816 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d7d59561a64e,41021,1732135193294 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T20:39:53,817 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:39:53,817 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:39:53,817 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:39:53,817 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:39:53,817 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d7d59561a64e:0, corePoolSize=10, maxPoolSize=10 2024-11-20T20:39:53,817 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:39:53,817 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d7d59561a64e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T20:39:53,817 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:39:53,821 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732135223821 2024-11-20T20:39:53,821 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T20:39:53,821 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T20:39:53,821 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T20:39:53,821 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T20:39:53,821 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T20:39:53,821 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T20:39:53,822 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:53,822 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:39:53,822 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T20:39:53,822 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T20:39:53,822 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T20:39:53,822 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T20:39:53,822 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T20:39:53,822 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T20:39:53,822 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135193822,5,FailOnTimeoutGroup] 2024-11-20T20:39:53,823 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135193823,5,FailOnTimeoutGroup] 2024-11-20T20:39:53,823 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:53,823 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T20:39:53,823 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:53,823 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:39:53,823 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:53,823 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T20:39:53,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741831_1007 (size=1321) 2024-11-20T20:39:53,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741831_1007 (size=1321) 2024-11-20T20:39:53,830 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T20:39:53,830 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d 2024-11-20T20:39:53,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741832_1008 (size=32) 2024-11-20T20:39:53,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741832_1008 (size=32) 2024-11-20T20:39:53,836 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:39:53,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T20:39:53,838 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T20:39:53,838 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:39:53,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:39:53,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T20:39:53,839 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T20:39:53,839 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:39:53,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:39:53,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T20:39:53,841 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T20:39:53,841 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:39:53,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:39:53,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T20:39:53,842 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T20:39:53,842 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:39:53,843 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:39:53,843 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T20:39:53,843 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740 2024-11-20T20:39:53,844 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740 2024-11-20T20:39:53,845 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T20:39:53,845 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T20:39:53,845 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T20:39:53,846 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T20:39:53,849 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:39:53,850 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=803574, jitterRate=0.021797940135002136}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T20:39:53,850 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732135193836Initializing all the Stores at 1732135193836Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135193836Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135193836Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135193836Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135193836Cleaning up temporary data from old regions at 1732135193845 (+9 ms)Region opened successfully at 1732135193850 (+5 ms) 2024-11-20T20:39:53,850 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T20:39:53,851 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T20:39:53,851 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T20:39:53,851 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T20:39:53,851 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T20:39:53,851 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T20:39:53,851 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732135193850Disabling compacts and flushes for region at 1732135193850Disabling writes for close at 1732135193851 (+1 ms)Writing region close event to WAL at 1732135193851Closed at 1732135193851 2024-11-20T20:39:53,853 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:39:53,853 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T20:39:53,853 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T20:39:53,854 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T20:39:53,855 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T20:39:53,879 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(746): ClusterId : 20bd1667-919d-44ff-a018-43ca623c19d8 2024-11-20T20:39:53,879 DEBUG [RS:0;d7d59561a64e:39133 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T20:39:53,943 DEBUG [RS:0;d7d59561a64e:39133 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T20:39:53,943 DEBUG [RS:0;d7d59561a64e:39133 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T20:39:54,006 WARN [d7d59561a64e:41021 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T20:39:54,047 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:54,059 DEBUG [RS:0;d7d59561a64e:39133 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T20:39:54,060 DEBUG [RS:0;d7d59561a64e:39133 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ba86542, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d7d59561a64e/172.17.0.2:0 2024-11-20T20:39:54,075 DEBUG [RS:0;d7d59561a64e:39133 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d7d59561a64e:39133 2024-11-20T20:39:54,075 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T20:39:54,075 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T20:39:54,075 DEBUG [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T20:39:54,076 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(2659): reportForDuty to master=d7d59561a64e,41021,1732135193294 with port=39133, startcode=1732135193457 2024-11-20T20:39:54,076 DEBUG [RS:0;d7d59561a64e:39133 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T20:39:54,078 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44275, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T20:39:54,079 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41021 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d7d59561a64e,39133,1732135193457 2024-11-20T20:39:54,079 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41021 {}] master.ServerManager(517): Registering regionserver=d7d59561a64e,39133,1732135193457 2024-11-20T20:39:54,080 DEBUG [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d 2024-11-20T20:39:54,080 DEBUG [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43361 2024-11-20T20:39:54,080 DEBUG [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T20:39:54,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T20:39:54,164 DEBUG [RS:0;d7d59561a64e:39133 {}] zookeeper.ZKUtil(111): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d7d59561a64e,39133,1732135193457 2024-11-20T20:39:54,165 WARN [RS:0;d7d59561a64e:39133 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T20:39:54,165 INFO [RS:0;d7d59561a64e:39133 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:39:54,165 DEBUG [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/WALs/d7d59561a64e,39133,1732135193457 2024-11-20T20:39:54,165 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d7d59561a64e,39133,1732135193457] 2024-11-20T20:39:54,169 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T20:39:54,171 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T20:39:54,171 INFO [RS:0;d7d59561a64e:39133 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T20:39:54,172 INFO [RS:0;d7d59561a64e:39133 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,172 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T20:39:54,173 INFO [RS:0;d7d59561a64e:39133 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T20:39:54,173 INFO [RS:0;d7d59561a64e:39133 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,173 DEBUG [RS:0;d7d59561a64e:39133 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:39:54,173 DEBUG [RS:0;d7d59561a64e:39133 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:39:54,173 DEBUG [RS:0;d7d59561a64e:39133 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:39:54,173 DEBUG [RS:0;d7d59561a64e:39133 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:39:54,173 DEBUG [RS:0;d7d59561a64e:39133 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:39:54,173 DEBUG [RS:0;d7d59561a64e:39133 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d7d59561a64e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T20:39:54,173 DEBUG [RS:0;d7d59561a64e:39133 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:39:54,174 DEBUG [RS:0;d7d59561a64e:39133 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:39:54,174 DEBUG [RS:0;d7d59561a64e:39133 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:39:54,174 DEBUG [RS:0;d7d59561a64e:39133 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:39:54,174 DEBUG [RS:0;d7d59561a64e:39133 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:39:54,174 DEBUG [RS:0;d7d59561a64e:39133 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:39:54,174 DEBUG [RS:0;d7d59561a64e:39133 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d7d59561a64e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T20:39:54,174 DEBUG [RS:0;d7d59561a64e:39133 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T20:39:54,174 INFO [RS:0;d7d59561a64e:39133 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,175 INFO [RS:0;d7d59561a64e:39133 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,175 INFO [RS:0;d7d59561a64e:39133 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,175 INFO [RS:0;d7d59561a64e:39133 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,175 INFO [RS:0;d7d59561a64e:39133 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,175 INFO [RS:0;d7d59561a64e:39133 {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,39133,1732135193457-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T20:39:54,195 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T20:39:54,195 INFO [RS:0;d7d59561a64e:39133 {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,39133,1732135193457-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,195 INFO [RS:0;d7d59561a64e:39133 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,196 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.Replication(171): d7d59561a64e,39133,1732135193457 started 2024-11-20T20:39:54,211 INFO [RS:0;d7d59561a64e:39133 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,211 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(1482): Serving as d7d59561a64e,39133,1732135193457, RpcServer on d7d59561a64e/172.17.0.2:39133, sessionid=0x10159f986630001 2024-11-20T20:39:54,211 DEBUG [RS:0;d7d59561a64e:39133 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T20:39:54,211 DEBUG [RS:0;d7d59561a64e:39133 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d7d59561a64e,39133,1732135193457 2024-11-20T20:39:54,211 DEBUG [RS:0;d7d59561a64e:39133 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd7d59561a64e,39133,1732135193457' 2024-11-20T20:39:54,211 DEBUG [RS:0;d7d59561a64e:39133 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T20:39:54,212 DEBUG [RS:0;d7d59561a64e:39133 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T20:39:54,212 DEBUG [RS:0;d7d59561a64e:39133 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T20:39:54,212 DEBUG [RS:0;d7d59561a64e:39133 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T20:39:54,212 DEBUG [RS:0;d7d59561a64e:39133 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d7d59561a64e,39133,1732135193457 2024-11-20T20:39:54,212 DEBUG [RS:0;d7d59561a64e:39133 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd7d59561a64e,39133,1732135193457' 2024-11-20T20:39:54,212 DEBUG [RS:0;d7d59561a64e:39133 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T20:39:54,212 DEBUG [RS:0;d7d59561a64e:39133 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T20:39:54,213 DEBUG [RS:0;d7d59561a64e:39133 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T20:39:54,213 INFO [RS:0;d7d59561a64e:39133 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T20:39:54,213 INFO [RS:0;d7d59561a64e:39133 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T20:39:54,315 INFO [RS:0;d7d59561a64e:39133 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C39133%2C1732135193457, suffix=, logDir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/WALs/d7d59561a64e,39133,1732135193457, archiveDir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/oldWALs, maxLogs=32 2024-11-20T20:39:54,316 INFO [RS:0;d7d59561a64e:39133 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C39133%2C1732135193457.1732135194316 2024-11-20T20:39:54,329 INFO [RS:0;d7d59561a64e:39133 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/WALs/d7d59561a64e,39133,1732135193457/d7d59561a64e%2C39133%2C1732135193457.1732135194316 2024-11-20T20:39:54,333 DEBUG [RS:0;d7d59561a64e:39133 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40741:40741),(127.0.0.1/127.0.0.1:37349:37349)] 2024-11-20T20:39:54,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:54,506 DEBUG [d7d59561a64e:41021 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T20:39:54,507 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d7d59561a64e,39133,1732135193457 2024-11-20T20:39:54,508 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d7d59561a64e,39133,1732135193457, state=OPENING 2024-11-20T20:39:54,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:54,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:54,609 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T20:39:54,620 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:39:54,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:39:54,620 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:39:54,620 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T20:39:54,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d7d59561a64e,39133,1732135193457}] 2024-11-20T20:39:54,621 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:39:54,774 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T20:39:54,776 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44351, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T20:39:54,780 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T20:39:54,780 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:39:54,782 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C39133%2C1732135193457.meta, suffix=.meta, logDir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/WALs/d7d59561a64e,39133,1732135193457, archiveDir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/oldWALs, maxLogs=32 2024-11-20T20:39:54,782 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C39133%2C1732135193457.meta.1732135194782.meta 2024-11-20T20:39:54,787 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/WALs/d7d59561a64e,39133,1732135193457/d7d59561a64e%2C39133%2C1732135193457.meta.1732135194782.meta 2024-11-20T20:39:54,788 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40741:40741),(127.0.0.1/127.0.0.1:37349:37349)] 2024-11-20T20:39:54,792 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:39:54,793 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T20:39:54,793 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T20:39:54,793 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T20:39:54,793 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T20:39:54,793 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:39:54,793 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T20:39:54,793 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T20:39:54,794 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T20:39:54,795 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T20:39:54,795 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:39:54,795 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:39:54,795 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T20:39:54,796 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T20:39:54,796 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:39:54,797 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:39:54,797 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T20:39:54,797 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T20:39:54,797 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:39:54,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:39:54,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T20:39:54,798 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T20:39:54,799 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:39:54,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:39:54,799 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T20:39:54,800 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740 2024-11-20T20:39:54,801 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740 2024-11-20T20:39:54,802 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T20:39:54,802 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T20:39:54,802 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T20:39:54,804 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T20:39:54,804 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766633, jitterRate=-0.025176629424095154}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T20:39:54,804 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T20:39:54,805 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732135194793Writing region info on filesystem at 1732135194793Initializing all the Stores at 1732135194794 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135194794Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135194794Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135194794Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135194794Cleaning up temporary data from old regions at 1732135194802 (+8 ms)Running coprocessor post-open hooks at 1732135194804 (+2 ms)Region opened successfully at 1732135194805 (+1 ms) 2024-11-20T20:39:54,806 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732135194774 2024-11-20T20:39:54,808 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T20:39:54,808 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T20:39:54,809 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d7d59561a64e,39133,1732135193457 2024-11-20T20:39:54,810 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d7d59561a64e,39133,1732135193457, state=OPEN 2024-11-20T20:39:54,848 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T20:39:54,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T20:39:54,848 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d7d59561a64e,39133,1732135193457 2024-11-20T20:39:54,848 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:39:54,848 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:39:54,851 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T20:39:54,851 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d7d59561a64e,39133,1732135193457 in 228 msec 2024-11-20T20:39:54,853 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T20:39:54,853 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 998 msec 2024-11-20T20:39:54,853 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:39:54,853 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T20:39:54,855 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T20:39:54,855 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d7d59561a64e,39133,1732135193457, seqNum=-1] 2024-11-20T20:39:54,855 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T20:39:54,856 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45281, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T20:39:54,862 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0470 sec 2024-11-20T20:39:54,863 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732135194862, completionTime=-1 2024-11-20T20:39:54,863 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T20:39:54,863 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T20:39:54,865 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T20:39:54,865 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732135254865 2024-11-20T20:39:54,865 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732135314865 2024-11-20T20:39:54,865 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-20T20:39:54,865 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,41021,1732135193294-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,865 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,41021,1732135193294-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,865 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,41021,1732135193294-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,866 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d7d59561a64e:41021, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,866 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,866 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,868 DEBUG [master/d7d59561a64e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T20:39:54,869 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.347sec 2024-11-20T20:39:54,870 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T20:39:54,870 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T20:39:54,870 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T20:39:54,870 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T20:39:54,870 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T20:39:54,870 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,41021,1732135193294-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T20:39:54,870 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,41021,1732135193294-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T20:39:54,872 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T20:39:54,872 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T20:39:54,872 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,41021,1732135193294-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:39:54,880 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@205ee7b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:39:54,880 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d7d59561a64e,41021,-1 for getting cluster id 2024-11-20T20:39:54,880 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T20:39:54,881 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '20bd1667-919d-44ff-a018-43ca623c19d8' 2024-11-20T20:39:54,882 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T20:39:54,882 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "20bd1667-919d-44ff-a018-43ca623c19d8" 2024-11-20T20:39:54,882 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@754209db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:39:54,882 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d7d59561a64e,41021,-1] 2024-11-20T20:39:54,882 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T20:39:54,883 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:39:54,884 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49344, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T20:39:54,885 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62921d24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:39:54,885 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T20:39:54,886 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d7d59561a64e,39133,1732135193457, seqNum=-1] 2024-11-20T20:39:54,886 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T20:39:54,888 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50792, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T20:39:54,889 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d7d59561a64e,41021,1732135193294 2024-11-20T20:39:54,890 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:39:54,893 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T20:39:54,893 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T20:39:54,894 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is d7d59561a64e,41021,1732135193294 2024-11-20T20:39:54,894 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3346153c 2024-11-20T20:39:54,894 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T20:39:54,896 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49358, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T20:39:54,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41021 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T20:39:54,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41021 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T20:39:54,896 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41021 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T20:39:54,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41021 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-20T20:39:54,899 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T20:39:54,899 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:39:54,899 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41021 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-20T20:39:54,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41021 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T20:39:54,901 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T20:39:54,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741835_1011 (size=381) 2024-11-20T20:39:54,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741835_1011 (size=381) 2024-11-20T20:39:54,909 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b0aa728e838aa4bdcbfc7aeb3ad338bd, NAME => 'TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d 2024-11-20T20:39:54,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741836_1012 (size=64) 2024-11-20T20:39:54,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741836_1012 (size=64) 2024-11-20T20:39:54,924 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:39:54,924 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing b0aa728e838aa4bdcbfc7aeb3ad338bd, disabling compactions & flushes 2024-11-20T20:39:54,924 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. 2024-11-20T20:39:54,924 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. 2024-11-20T20:39:54,924 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. after waiting 0 ms 2024-11-20T20:39:54,924 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. 2024-11-20T20:39:54,924 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. 2024-11-20T20:39:54,924 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for b0aa728e838aa4bdcbfc7aeb3ad338bd: Waiting for close lock at 1732135194924Disabling compacts and flushes for region at 1732135194924Disabling writes for close at 1732135194924Writing region close event to WAL at 1732135194924Closed at 1732135194924 2024-11-20T20:39:54,926 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T20:39:54,926 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732135194926"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732135194926"}]},"ts":"1732135194926"} 2024-11-20T20:39:54,928 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T20:39:54,930 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T20:39:54,930 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732135194930"}]},"ts":"1732135194930"} 2024-11-20T20:39:54,932 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-20T20:39:54,933 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b0aa728e838aa4bdcbfc7aeb3ad338bd, ASSIGN}] 2024-11-20T20:39:54,934 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b0aa728e838aa4bdcbfc7aeb3ad338bd, ASSIGN 2024-11-20T20:39:54,935 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b0aa728e838aa4bdcbfc7aeb3ad338bd, ASSIGN; state=OFFLINE, location=d7d59561a64e,39133,1732135193457; forceNewPlan=false, retain=false 2024-11-20T20:39:55,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:55,086 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b0aa728e838aa4bdcbfc7aeb3ad338bd, regionState=OPENING, regionLocation=d7d59561a64e,39133,1732135193457 2024-11-20T20:39:55,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b0aa728e838aa4bdcbfc7aeb3ad338bd, ASSIGN because future has completed 2024-11-20T20:39:55,089 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b0aa728e838aa4bdcbfc7aeb3ad338bd, server=d7d59561a64e,39133,1732135193457}] 2024-11-20T20:39:55,246 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. 2024-11-20T20:39:55,247 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b0aa728e838aa4bdcbfc7aeb3ad338bd, NAME => 'TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:39:55,247 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:39:55,247 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:39:55,247 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:39:55,247 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:39:55,249 INFO [StoreOpener-b0aa728e838aa4bdcbfc7aeb3ad338bd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:39:55,251 INFO [StoreOpener-b0aa728e838aa4bdcbfc7aeb3ad338bd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b0aa728e838aa4bdcbfc7aeb3ad338bd columnFamilyName info 2024-11-20T20:39:55,251 DEBUG [StoreOpener-b0aa728e838aa4bdcbfc7aeb3ad338bd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:39:55,252 INFO [StoreOpener-b0aa728e838aa4bdcbfc7aeb3ad338bd-1 {}] regionserver.HStore(327): Store=b0aa728e838aa4bdcbfc7aeb3ad338bd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:39:55,252 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:39:55,253 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:39:55,254 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:39:55,254 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:39:55,254 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:39:55,256 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:39:55,259 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:39:55,260 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b0aa728e838aa4bdcbfc7aeb3ad338bd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=799487, jitterRate=0.016601070761680603}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T20:39:55,260 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:39:55,261 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b0aa728e838aa4bdcbfc7aeb3ad338bd: Running coprocessor pre-open hook at 1732135195247Writing region info on filesystem at 1732135195247Initializing all the Stores at 1732135195249 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135195249Cleaning up temporary data from old regions at 1732135195254 (+5 ms)Running coprocessor post-open hooks at 1732135195260 (+6 ms)Region opened successfully at 1732135195261 (+1 ms) 2024-11-20T20:39:55,262 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd., pid=6, masterSystemTime=1732135195241 2024-11-20T20:39:55,265 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. 2024-11-20T20:39:55,265 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. 2024-11-20T20:39:55,266 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b0aa728e838aa4bdcbfc7aeb3ad338bd, regionState=OPEN, openSeqNum=2, regionLocation=d7d59561a64e,39133,1732135193457 2024-11-20T20:39:55,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b0aa728e838aa4bdcbfc7aeb3ad338bd, server=d7d59561a64e,39133,1732135193457 because future has completed 2024-11-20T20:39:55,272 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,272 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,272 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,272 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,273 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,273 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,274 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T20:39:55,274 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b0aa728e838aa4bdcbfc7aeb3ad338bd, server=d7d59561a64e,39133,1732135193457 in 183 msec 2024-11-20T20:39:55,278 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T20:39:55,278 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b0aa728e838aa4bdcbfc7aeb3ad338bd, ASSIGN in 341 msec 2024-11-20T20:39:55,279 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T20:39:55,279 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732135195279"}]},"ts":"1732135195279"} 2024-11-20T20:39:55,282 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-20T20:39:55,283 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T20:39:55,286 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 386 msec 2024-11-20T20:39:55,296 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,296 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,296 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,297 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,297 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,297 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,304 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:55,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:55,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:55,809 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T20:39:55,810 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,811 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,811 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,811 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,811 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,838 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,838 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,840 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,845 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,845 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,846 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:55,849 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:39:56,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:56,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:56,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-20T20:39:56,486 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-20T20:39:56,487 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T20:39:56,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:56,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:57,049 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:57,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:57,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:57,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:58,050 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:58,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:58,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:58,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:59,050 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:59,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:59,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:39:59,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:00,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:00,169 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T20:40:00,170 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-20T20:40:00,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:00,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:00,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:00,751 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T20:40:00,752 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,752 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,753 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,753 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,753 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,754 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,786 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,791 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,791 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,791 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:00,794 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:01,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:01,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:01,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:01,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:02,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:02,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:02,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:02,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:03,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:03,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:03,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:03,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:04,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:04,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:04,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:04,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:04,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41021 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T20:40:04,966 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-20T20:40:04,967 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-20T20:40:04,970 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-20T20:40:04,970 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. 2024-11-20T20:40:04,974 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd., hostname=d7d59561a64e,39133,1732135193457, seqNum=2] 2024-11-20T20:40:04,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:04,990 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b0aa728e838aa4bdcbfc7aeb3ad338bd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T20:40:05,008 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/cbdc71e94be9477585060167ab89cf52 is 1080, key is row0001/info:/1732135204975/Put/seqid=0 2024-11-20T20:40:05,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741837_1013 (size=12509) 2024-11-20T20:40:05,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741837_1013 (size=12509) 2024-11-20T20:40:05,013 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/cbdc71e94be9477585060167ab89cf52 2024-11-20T20:40:05,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/cbdc71e94be9477585060167ab89cf52 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/cbdc71e94be9477585060167ab89cf52 2024-11-20T20:40:05,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/cbdc71e94be9477585060167ab89cf52, entries=7, sequenceid=11, filesize=12.2 K 2024-11-20T20:40:05,028 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=17.86 KB/18292 for b0aa728e838aa4bdcbfc7aeb3ad338bd in 37ms, sequenceid=11, compaction requested=false 2024-11-20T20:40:05,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b0aa728e838aa4bdcbfc7aeb3ad338bd: 2024-11-20T20:40:05,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:05,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b0aa728e838aa4bdcbfc7aeb3ad338bd 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-20T20:40:05,034 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/a5f29ecace024b92b4d53b4a831bd4ce is 1080, key is row0008/info:/1732135204991/Put/seqid=0 2024-11-20T20:40:05,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741838_1014 (size=24376) 2024-11-20T20:40:05,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741838_1014 (size=24376) 2024-11-20T20:40:05,040 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/a5f29ecace024b92b4d53b4a831bd4ce 2024-11-20T20:40:05,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/a5f29ecace024b92b4d53b4a831bd4ce as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/a5f29ecace024b92b4d53b4a831bd4ce 2024-11-20T20:40:05,054 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/a5f29ecace024b92b4d53b4a831bd4ce, entries=18, sequenceid=32, filesize=23.8 K 2024-11-20T20:40:05,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:05,055 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=7.36 KB/7532 for b0aa728e838aa4bdcbfc7aeb3ad338bd in 26ms, sequenceid=32, compaction requested=false 2024-11-20T20:40:05,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b0aa728e838aa4bdcbfc7aeb3ad338bd: 2024-11-20T20:40:05,055 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.0 K, sizeToCheck=16.0 K 2024-11-20T20:40:05,055 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:40:05,055 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/a5f29ecace024b92b4d53b4a831bd4ce because midkey is the same as first or last row 2024-11-20T20:40:05,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:05,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:05,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:06,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:06,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:06,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:06,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:07,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:07,048 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b0aa728e838aa4bdcbfc7aeb3ad338bd 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-20T20:40:07,053 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/c79eb27a22494166b3fa1add715a9e0a is 1080, key is row0026/info:/1732135205030/Put/seqid=0 2024-11-20T20:40:07,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:07,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741839_1015 (size=13586) 2024-11-20T20:40:07,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741839_1015 (size=13586) 2024-11-20T20:40:07,059 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/c79eb27a22494166b3fa1add715a9e0a 2024-11-20T20:40:07,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/c79eb27a22494166b3fa1add715a9e0a as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/c79eb27a22494166b3fa1add715a9e0a 2024-11-20T20:40:07,072 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/c79eb27a22494166b3fa1add715a9e0a, entries=8, sequenceid=43, filesize=13.3 K 2024-11-20T20:40:07,073 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=10.51 KB/10760 for b0aa728e838aa4bdcbfc7aeb3ad338bd in 25ms, sequenceid=43, compaction requested=true 2024-11-20T20:40:07,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b0aa728e838aa4bdcbfc7aeb3ad338bd: 2024-11-20T20:40:07,073 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=49.3 K, sizeToCheck=16.0 K 2024-11-20T20:40:07,073 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:40:07,074 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/a5f29ecace024b92b4d53b4a831bd4ce because midkey is the same as first or last row 2024-11-20T20:40:07,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b0aa728e838aa4bdcbfc7aeb3ad338bd:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T20:40:07,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:07,074 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T20:40:07,075 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T20:40:07,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:07,075 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1541): b0aa728e838aa4bdcbfc7aeb3ad338bd/info is initiating minor compaction (all files) 2024-11-20T20:40:07,075 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b0aa728e838aa4bdcbfc7aeb3ad338bd/info in TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. 2024-11-20T20:40:07,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b0aa728e838aa4bdcbfc7aeb3ad338bd 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-20T20:40:07,076 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/cbdc71e94be9477585060167ab89cf52, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/a5f29ecace024b92b4d53b4a831bd4ce, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/c79eb27a22494166b3fa1add715a9e0a] into tmpdir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp, totalSize=49.3 K 2024-11-20T20:40:07,076 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting cbdc71e94be9477585060167ab89cf52, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732135204975 2024-11-20T20:40:07,077 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting a5f29ecace024b92b4d53b4a831bd4ce, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=32, earliestPutTs=1732135204991 2024-11-20T20:40:07,077 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting c79eb27a22494166b3fa1add715a9e0a, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732135205030 2024-11-20T20:40:07,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/d5568e6ce4734418a6cb66ca7fa34a49 is 1080, key is row0034/info:/1732135207050/Put/seqid=0 2024-11-20T20:40:07,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741840_1016 (size=16817) 2024-11-20T20:40:07,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741840_1016 (size=16817) 2024-11-20T20:40:07,085 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=57 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/d5568e6ce4734418a6cb66ca7fa34a49 2024-11-20T20:40:07,091 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b0aa728e838aa4bdcbfc7aeb3ad338bd#info#compaction#59 average throughput is 16.93 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T20:40:07,091 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/39e76483cc1f467d8c830e43a68c5ef4 is 1080, key is row0001/info:/1732135204975/Put/seqid=0 2024-11-20T20:40:07,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/d5568e6ce4734418a6cb66ca7fa34a49 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d5568e6ce4734418a6cb66ca7fa34a49 2024-11-20T20:40:07,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741841_1017 (size=40670) 2024-11-20T20:40:07,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741841_1017 (size=40670) 2024-11-20T20:40:07,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d5568e6ce4734418a6cb66ca7fa34a49, entries=11, sequenceid=57, filesize=16.4 K 2024-11-20T20:40:07,103 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for b0aa728e838aa4bdcbfc7aeb3ad338bd in 27ms, sequenceid=57, compaction requested=false 2024-11-20T20:40:07,103 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b0aa728e838aa4bdcbfc7aeb3ad338bd: 2024-11-20T20:40:07,103 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.7 K, sizeToCheck=16.0 K 2024-11-20T20:40:07,103 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:40:07,103 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/a5f29ecace024b92b4d53b4a831bd4ce because midkey is the same as first or last row 2024-11-20T20:40:07,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:07,105 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b0aa728e838aa4bdcbfc7aeb3ad338bd 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-20T20:40:07,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/b804052ce162458ea4012207fea283ab is 1080, key is row0045/info:/1732135207077/Put/seqid=0 2024-11-20T20:40:07,111 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/39e76483cc1f467d8c830e43a68c5ef4 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/39e76483cc1f467d8c830e43a68c5ef4 2024-11-20T20:40:07,119 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b0aa728e838aa4bdcbfc7aeb3ad338bd/info of b0aa728e838aa4bdcbfc7aeb3ad338bd into 39e76483cc1f467d8c830e43a68c5ef4(size=39.7 K), total size for store is 56.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T20:40:07,119 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b0aa728e838aa4bdcbfc7aeb3ad338bd: 2024-11-20T20:40:07,119 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd., storeName=b0aa728e838aa4bdcbfc7aeb3ad338bd/info, priority=13, startTime=1732135207074; duration=0sec 2024-11-20T20:40:07,119 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-20T20:40:07,119 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:40:07,119 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/39e76483cc1f467d8c830e43a68c5ef4 because midkey is the same as first or last row 2024-11-20T20:40:07,119 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-20T20:40:07,119 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:40:07,119 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/39e76483cc1f467d8c830e43a68c5ef4 because midkey is the same as first or last row 2024-11-20T20:40:07,119 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-20T20:40:07,119 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:40:07,120 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/39e76483cc1f467d8c830e43a68c5ef4 because midkey is the same as first or last row 2024-11-20T20:40:07,120 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:07,120 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b0aa728e838aa4bdcbfc7aeb3ad338bd:info 2024-11-20T20:40:07,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741842_1018 (size=16817) 2024-11-20T20:40:07,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741842_1018 (size=16817) 2024-11-20T20:40:07,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=71 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/b804052ce162458ea4012207fea283ab 2024-11-20T20:40:07,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/b804052ce162458ea4012207fea283ab as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/b804052ce162458ea4012207fea283ab 2024-11-20T20:40:07,144 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/b804052ce162458ea4012207fea283ab, entries=11, sequenceid=71, filesize=16.4 K 2024-11-20T20:40:07,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for b0aa728e838aa4bdcbfc7aeb3ad338bd in 41ms, sequenceid=71, compaction requested=true 2024-11-20T20:40:07,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b0aa728e838aa4bdcbfc7aeb3ad338bd: 2024-11-20T20:40:07,145 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-20T20:40:07,145 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:40:07,145 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/39e76483cc1f467d8c830e43a68c5ef4 because midkey is the same as first or last row 2024-11-20T20:40:07,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b0aa728e838aa4bdcbfc7aeb3ad338bd:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T20:40:07,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:07,145 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T20:40:07,147 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 74304 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T20:40:07,147 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1541): b0aa728e838aa4bdcbfc7aeb3ad338bd/info is initiating minor compaction (all files) 2024-11-20T20:40:07,147 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b0aa728e838aa4bdcbfc7aeb3ad338bd/info in TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. 2024-11-20T20:40:07,147 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/39e76483cc1f467d8c830e43a68c5ef4, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d5568e6ce4734418a6cb66ca7fa34a49, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/b804052ce162458ea4012207fea283ab] into tmpdir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp, totalSize=72.6 K 2024-11-20T20:40:07,148 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting 39e76483cc1f467d8c830e43a68c5ef4, keycount=33, bloomtype=ROW, size=39.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732135204975 2024-11-20T20:40:07,148 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting d5568e6ce4734418a6cb66ca7fa34a49, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1732135207050 2024-11-20T20:40:07,148 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting b804052ce162458ea4012207fea283ab, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=71, earliestPutTs=1732135207077 2024-11-20T20:40:07,162 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b0aa728e838aa4bdcbfc7aeb3ad338bd#info#compaction#61 average throughput is 28.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T20:40:07,163 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/d8f87b1edc114c42b028453bed8ac56d is 1080, key is row0001/info:/1732135204975/Put/seqid=0 2024-11-20T20:40:07,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741843_1019 (size=64535) 2024-11-20T20:40:07,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741843_1019 (size=64535) 2024-11-20T20:40:07,174 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/d8f87b1edc114c42b028453bed8ac56d as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d8f87b1edc114c42b028453bed8ac56d 2024-11-20T20:40:07,181 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b0aa728e838aa4bdcbfc7aeb3ad338bd/info of b0aa728e838aa4bdcbfc7aeb3ad338bd into d8f87b1edc114c42b028453bed8ac56d(size=63.0 K), total size for store is 63.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T20:40:07,181 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b0aa728e838aa4bdcbfc7aeb3ad338bd: 2024-11-20T20:40:07,181 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd., storeName=b0aa728e838aa4bdcbfc7aeb3ad338bd/info, priority=13, startTime=1732135207145; duration=0sec 2024-11-20T20:40:07,181 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.0 K, sizeToCheck=16.0 K 2024-11-20T20:40:07,181 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:40:07,181 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d8f87b1edc114c42b028453bed8ac56d because midkey is the same as first or last row 2024-11-20T20:40:07,181 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.0 K, sizeToCheck=16.0 K 2024-11-20T20:40:07,181 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:40:07,181 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d8f87b1edc114c42b028453bed8ac56d because midkey is the same as first or last row 2024-11-20T20:40:07,182 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.0 K, sizeToCheck=16.0 K 2024-11-20T20:40:07,182 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:40:07,182 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d8f87b1edc114c42b028453bed8ac56d because midkey is the same as first or last row 2024-11-20T20:40:07,182 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:07,182 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b0aa728e838aa4bdcbfc7aeb3ad338bd:info 2024-11-20T20:40:07,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:07,533 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:07,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:08,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:08,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:08,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:08,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:09,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:09,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:09,131 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b0aa728e838aa4bdcbfc7aeb3ad338bd 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-20T20:40:09,139 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/5aa6527200d943abbe13c32236aa3211 is 1080, key is row0056/info:/1732135207106/Put/seqid=0 2024-11-20T20:40:09,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741844_1020 (size=15740) 2024-11-20T20:40:09,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741844_1020 (size=15740) 2024-11-20T20:40:09,145 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/5aa6527200d943abbe13c32236aa3211 2024-11-20T20:40:09,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/5aa6527200d943abbe13c32236aa3211 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/5aa6527200d943abbe13c32236aa3211 2024-11-20T20:40:09,156 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/5aa6527200d943abbe13c32236aa3211, entries=10, sequenceid=86, filesize=15.4 K 2024-11-20T20:40:09,157 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=10.51 KB/10760 for b0aa728e838aa4bdcbfc7aeb3ad338bd in 26ms, sequenceid=86, compaction requested=false 2024-11-20T20:40:09,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b0aa728e838aa4bdcbfc7aeb3ad338bd: 2024-11-20T20:40:09,157 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.4 K, sizeToCheck=16.0 K 2024-11-20T20:40:09,157 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:40:09,157 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d8f87b1edc114c42b028453bed8ac56d because midkey is the same as first or last row 2024-11-20T20:40:09,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:09,159 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b0aa728e838aa4bdcbfc7aeb3ad338bd 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-20T20:40:09,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/de9f864c6139482b8188ed89e9a67319 is 1080, key is row0066/info:/1732135209134/Put/seqid=0 2024-11-20T20:40:09,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741845_1021 (size=17894) 2024-11-20T20:40:09,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741845_1021 (size=17894) 2024-11-20T20:40:09,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/de9f864c6139482b8188ed89e9a67319 2024-11-20T20:40:09,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/de9f864c6139482b8188ed89e9a67319 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/de9f864c6139482b8188ed89e9a67319 2024-11-20T20:40:09,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/de9f864c6139482b8188ed89e9a67319, entries=12, sequenceid=101, filesize=17.5 K 2024-11-20T20:40:09,183 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for b0aa728e838aa4bdcbfc7aeb3ad338bd in 24ms, sequenceid=101, compaction requested=true 2024-11-20T20:40:09,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b0aa728e838aa4bdcbfc7aeb3ad338bd: 2024-11-20T20:40:09,183 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=95.9 K, sizeToCheck=16.0 K 2024-11-20T20:40:09,183 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:40:09,183 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d8f87b1edc114c42b028453bed8ac56d because midkey is the same as first or last row 2024-11-20T20:40:09,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:09,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b0aa728e838aa4bdcbfc7aeb3ad338bd:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T20:40:09,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:09,183 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T20:40:09,183 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b0aa728e838aa4bdcbfc7aeb3ad338bd 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-20T20:40:09,184 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 98169 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T20:40:09,184 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1541): b0aa728e838aa4bdcbfc7aeb3ad338bd/info is initiating minor compaction (all files) 2024-11-20T20:40:09,185 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b0aa728e838aa4bdcbfc7aeb3ad338bd/info in TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. 2024-11-20T20:40:09,185 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d8f87b1edc114c42b028453bed8ac56d, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/5aa6527200d943abbe13c32236aa3211, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/de9f864c6139482b8188ed89e9a67319] into tmpdir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp, totalSize=95.9 K 2024-11-20T20:40:09,185 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting d8f87b1edc114c42b028453bed8ac56d, keycount=55, bloomtype=ROW, size=63.0 K, encoding=NONE, compression=NONE, seqNum=71, earliestPutTs=1732135204975 2024-11-20T20:40:09,186 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5aa6527200d943abbe13c32236aa3211, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1732135207106 2024-11-20T20:40:09,186 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting de9f864c6139482b8188ed89e9a67319, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1732135209134 2024-11-20T20:40:09,187 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/5a77e4f1e96a4f8f9f9e0ce0a7cee28b is 1080, key is row0078/info:/1732135209160/Put/seqid=0 2024-11-20T20:40:09,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741846_1022 (size=17894) 2024-11-20T20:40:09,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741846_1022 (size=17894) 2024-11-20T20:40:09,195 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/5a77e4f1e96a4f8f9f9e0ce0a7cee28b 2024-11-20T20:40:09,200 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b0aa728e838aa4bdcbfc7aeb3ad338bd#info#compaction#65 average throughput is 26.34 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T20:40:09,201 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/519ce62031d64053a936145f91240220 is 1080, key is row0001/info:/1732135204975/Put/seqid=0 2024-11-20T20:40:09,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/5a77e4f1e96a4f8f9f9e0ce0a7cee28b as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/5a77e4f1e96a4f8f9f9e0ce0a7cee28b 2024-11-20T20:40:09,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741847_1023 (size=88408) 2024-11-20T20:40:09,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741847_1023 (size=88408) 2024-11-20T20:40:09,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/5a77e4f1e96a4f8f9f9e0ce0a7cee28b, entries=12, sequenceid=116, filesize=17.5 K 2024-11-20T20:40:09,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=7.36 KB/7532 for b0aa728e838aa4bdcbfc7aeb3ad338bd in 23ms, sequenceid=116, compaction requested=false 2024-11-20T20:40:09,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b0aa728e838aa4bdcbfc7aeb3ad338bd: 2024-11-20T20:40:09,207 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.3 K, sizeToCheck=16.0 K 2024-11-20T20:40:09,207 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:40:09,207 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d8f87b1edc114c42b028453bed8ac56d because midkey is the same as first or last row 2024-11-20T20:40:09,210 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/519ce62031d64053a936145f91240220 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/519ce62031d64053a936145f91240220 2024-11-20T20:40:09,216 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b0aa728e838aa4bdcbfc7aeb3ad338bd/info of b0aa728e838aa4bdcbfc7aeb3ad338bd into 519ce62031d64053a936145f91240220(size=86.3 K), total size for store is 103.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T20:40:09,216 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b0aa728e838aa4bdcbfc7aeb3ad338bd: 2024-11-20T20:40:09,216 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd., storeName=b0aa728e838aa4bdcbfc7aeb3ad338bd/info, priority=13, startTime=1732135209183; duration=0sec 2024-11-20T20:40:09,216 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=103.8 K, sizeToCheck=16.0 K 2024-11-20T20:40:09,216 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:40:09,217 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=103.8 K, sizeToCheck=16.0 K 2024-11-20T20:40:09,217 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:40:09,217 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=103.8 K, sizeToCheck=16.0 K 2024-11-20T20:40:09,217 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T20:40:09,218 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:09,218 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:09,218 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b0aa728e838aa4bdcbfc7aeb3ad338bd:info 2024-11-20T20:40:09,219 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41021 {}] assignment.AssignmentManager(1355): Split request from d7d59561a64e,39133,1732135193457, parent={ENCODED => b0aa728e838aa4bdcbfc7aeb3ad338bd, NAME => 'TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-20T20:40:09,223 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41021 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=d7d59561a64e,39133,1732135193457 2024-11-20T20:40:09,226 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41021 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b0aa728e838aa4bdcbfc7aeb3ad338bd, daughterA=cbb091bbe0cb6e3e9160a6fb0a0a4f3e, daughterB=4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:09,227 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b0aa728e838aa4bdcbfc7aeb3ad338bd, daughterA=cbb091bbe0cb6e3e9160a6fb0a0a4f3e, daughterB=4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:09,227 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b0aa728e838aa4bdcbfc7aeb3ad338bd, daughterA=cbb091bbe0cb6e3e9160a6fb0a0a4f3e, daughterB=4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:09,227 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b0aa728e838aa4bdcbfc7aeb3ad338bd, daughterA=cbb091bbe0cb6e3e9160a6fb0a0a4f3e, daughterB=4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:09,234 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b0aa728e838aa4bdcbfc7aeb3ad338bd, UNASSIGN}] 2024-11-20T20:40:09,235 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b0aa728e838aa4bdcbfc7aeb3ad338bd, UNASSIGN 2024-11-20T20:40:09,237 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=b0aa728e838aa4bdcbfc7aeb3ad338bd, regionState=CLOSING, regionLocation=d7d59561a64e,39133,1732135193457 2024-11-20T20:40:09,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b0aa728e838aa4bdcbfc7aeb3ad338bd, UNASSIGN because future has completed 2024-11-20T20:40:09,240 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-20T20:40:09,240 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure b0aa728e838aa4bdcbfc7aeb3ad338bd, server=d7d59561a64e,39133,1732135193457}] 2024-11-20T20:40:09,397 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:09,397 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-20T20:40:09,398 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing b0aa728e838aa4bdcbfc7aeb3ad338bd, disabling compactions & flushes 2024-11-20T20:40:09,398 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. 2024-11-20T20:40:09,398 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. 2024-11-20T20:40:09,398 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. after waiting 0 ms 2024-11-20T20:40:09,398 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. 2024-11-20T20:40:09,398 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing b0aa728e838aa4bdcbfc7aeb3ad338bd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T20:40:09,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:09,404 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/4d4020e57ee8492c8d332e515d2bed11 is 1080, key is row0090/info:/1732135209184/Put/seqid=0 2024-11-20T20:40:09,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741848_1024 (size=12509) 2024-11-20T20:40:09,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741848_1024 (size=12509) 2024-11-20T20:40:09,411 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/4d4020e57ee8492c8d332e515d2bed11 2024-11-20T20:40:09,418 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/.tmp/info/4d4020e57ee8492c8d332e515d2bed11 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/4d4020e57ee8492c8d332e515d2bed11 2024-11-20T20:40:09,423 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/4d4020e57ee8492c8d332e515d2bed11, entries=7, sequenceid=127, filesize=12.2 K 2024-11-20T20:40:09,424 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for b0aa728e838aa4bdcbfc7aeb3ad338bd in 26ms, sequenceid=127, compaction requested=true 2024-11-20T20:40:09,425 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/cbdc71e94be9477585060167ab89cf52, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/a5f29ecace024b92b4d53b4a831bd4ce, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/39e76483cc1f467d8c830e43a68c5ef4, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/c79eb27a22494166b3fa1add715a9e0a, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d5568e6ce4734418a6cb66ca7fa34a49, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d8f87b1edc114c42b028453bed8ac56d, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/b804052ce162458ea4012207fea283ab, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/5aa6527200d943abbe13c32236aa3211, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/de9f864c6139482b8188ed89e9a67319] to archive 2024-11-20T20:40:09,426 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T20:40:09,428 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/cbdc71e94be9477585060167ab89cf52 to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/cbdc71e94be9477585060167ab89cf52 2024-11-20T20:40:09,429 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/a5f29ecace024b92b4d53b4a831bd4ce to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/a5f29ecace024b92b4d53b4a831bd4ce 2024-11-20T20:40:09,431 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/39e76483cc1f467d8c830e43a68c5ef4 to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/39e76483cc1f467d8c830e43a68c5ef4 2024-11-20T20:40:09,432 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/c79eb27a22494166b3fa1add715a9e0a to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/c79eb27a22494166b3fa1add715a9e0a 2024-11-20T20:40:09,433 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d5568e6ce4734418a6cb66ca7fa34a49 to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d5568e6ce4734418a6cb66ca7fa34a49 2024-11-20T20:40:09,434 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d8f87b1edc114c42b028453bed8ac56d to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/d8f87b1edc114c42b028453bed8ac56d 2024-11-20T20:40:09,435 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/b804052ce162458ea4012207fea283ab to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/b804052ce162458ea4012207fea283ab 2024-11-20T20:40:09,436 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/5aa6527200d943abbe13c32236aa3211 to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/5aa6527200d943abbe13c32236aa3211 2024-11-20T20:40:09,438 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/de9f864c6139482b8188ed89e9a67319 to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/de9f864c6139482b8188ed89e9a67319 2024-11-20T20:40:09,445 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-11-20T20:40:09,446 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. 2024-11-20T20:40:09,446 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for b0aa728e838aa4bdcbfc7aeb3ad338bd: Waiting for close lock at 1732135209398Running coprocessor pre-close hooks at 1732135209398Disabling compacts and flushes for region at 1732135209398Disabling writes for close at 1732135209398Obtaining lock to block concurrent updates at 1732135209398Preparing flush snapshotting stores in b0aa728e838aa4bdcbfc7aeb3ad338bd at 1732135209398Finished memstore snapshotting TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd., syncing WAL and waiting on mvcc, flushsize=dataSize=7532, getHeapSize=8304, getOffHeapSize=0, getCellsCount=7 at 1732135209398Flushing stores of TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. at 1732135209399 (+1 ms)Flushing b0aa728e838aa4bdcbfc7aeb3ad338bd/info: creating writer at 1732135209399Flushing b0aa728e838aa4bdcbfc7aeb3ad338bd/info: appending metadata at 1732135209404 (+5 ms)Flushing b0aa728e838aa4bdcbfc7aeb3ad338bd/info: closing flushed file at 1732135209404Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d184bc4: reopening flushed file at 1732135209417 (+13 ms)Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for b0aa728e838aa4bdcbfc7aeb3ad338bd in 26ms, sequenceid=127, compaction requested=true at 1732135209424 (+7 ms)Writing region close event to WAL at 1732135209441 (+17 ms)Running coprocessor post-close hooks at 1732135209446 (+5 ms)Closed at 1732135209446 2024-11-20T20:40:09,449 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:09,450 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=b0aa728e838aa4bdcbfc7aeb3ad338bd, regionState=CLOSED 2024-11-20T20:40:09,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure b0aa728e838aa4bdcbfc7aeb3ad338bd, server=d7d59561a64e,39133,1732135193457 because future has completed 2024-11-20T20:40:09,457 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-20T20:40:09,457 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure b0aa728e838aa4bdcbfc7aeb3ad338bd, server=d7d59561a64e,39133,1732135193457 in 214 msec 2024-11-20T20:40:09,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-20T20:40:09,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b0aa728e838aa4bdcbfc7aeb3ad338bd, UNASSIGN in 223 msec 2024-11-20T20:40:09,473 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:09,477 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=b0aa728e838aa4bdcbfc7aeb3ad338bd, threads=3 2024-11-20T20:40:09,480 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/5a77e4f1e96a4f8f9f9e0ce0a7cee28b for region: b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:09,480 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/4d4020e57ee8492c8d332e515d2bed11 for region: b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:09,480 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/519ce62031d64053a936145f91240220 for region: b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:09,490 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/4d4020e57ee8492c8d332e515d2bed11, top=true 2024-11-20T20:40:09,490 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/5a77e4f1e96a4f8f9f9e0ce0a7cee28b, top=true 2024-11-20T20:40:09,501 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/TestLogRolling-testLogRolling=b0aa728e838aa4bdcbfc7aeb3ad338bd-5a77e4f1e96a4f8f9f9e0ce0a7cee28b for child: 4b96a5b3a18a8b5af1cb54b54643d45a, parent: b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:09,501 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/TestLogRolling-testLogRolling=b0aa728e838aa4bdcbfc7aeb3ad338bd-4d4020e57ee8492c8d332e515d2bed11 for child: 4b96a5b3a18a8b5af1cb54b54643d45a, parent: b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:09,501 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/5a77e4f1e96a4f8f9f9e0ce0a7cee28b for region: b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:09,501 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/4d4020e57ee8492c8d332e515d2bed11 for region: b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:09,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741849_1025 (size=27) 2024-11-20T20:40:09,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741849_1025 (size=27) 2024-11-20T20:40:09,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741850_1026 (size=27) 2024-11-20T20:40:09,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741850_1026 (size=27) 2024-11-20T20:40:09,511 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/519ce62031d64053a936145f91240220 for region: b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:09,514 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region b0aa728e838aa4bdcbfc7aeb3ad338bd Daughter A: [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/cbb091bbe0cb6e3e9160a6fb0a0a4f3e/info/519ce62031d64053a936145f91240220.b0aa728e838aa4bdcbfc7aeb3ad338bd] storefiles, Daughter B: [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/TestLogRolling-testLogRolling=b0aa728e838aa4bdcbfc7aeb3ad338bd-4d4020e57ee8492c8d332e515d2bed11, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/519ce62031d64053a936145f91240220.b0aa728e838aa4bdcbfc7aeb3ad338bd, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/TestLogRolling-testLogRolling=b0aa728e838aa4bdcbfc7aeb3ad338bd-5a77e4f1e96a4f8f9f9e0ce0a7cee28b] storefiles. 2024-11-20T20:40:09,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741851_1027 (size=71) 2024-11-20T20:40:09,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741851_1027 (size=71) 2024-11-20T20:40:09,523 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:09,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741852_1028 (size=71) 2024-11-20T20:40:09,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741852_1028 (size=71) 2024-11-20T20:40:09,534 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:09,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:09,545 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/cbb091bbe0cb6e3e9160a6fb0a0a4f3e/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-20T20:40:09,547 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-20T20:40:09,550 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732135209549"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732135209549"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732135209549"}]},"ts":"1732135209549"} 2024-11-20T20:40:09,550 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732135209549"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732135209549"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732135209549"}]},"ts":"1732135209549"} 2024-11-20T20:40:09,550 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732135209549"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732135209549"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732135209549"}]},"ts":"1732135209549"} 2024-11-20T20:40:09,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:09,569 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=cbb091bbe0cb6e3e9160a6fb0a0a4f3e, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4b96a5b3a18a8b5af1cb54b54643d45a, ASSIGN}] 2024-11-20T20:40:09,570 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=cbb091bbe0cb6e3e9160a6fb0a0a4f3e, ASSIGN 2024-11-20T20:40:09,570 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4b96a5b3a18a8b5af1cb54b54643d45a, ASSIGN 2024-11-20T20:40:09,571 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4b96a5b3a18a8b5af1cb54b54643d45a, ASSIGN; state=SPLITTING_NEW, location=d7d59561a64e,39133,1732135193457; forceNewPlan=false, retain=false 2024-11-20T20:40:09,571 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=cbb091bbe0cb6e3e9160a6fb0a0a4f3e, ASSIGN; state=SPLITTING_NEW, location=d7d59561a64e,39133,1732135193457; forceNewPlan=false, retain=false 2024-11-20T20:40:09,722 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=4b96a5b3a18a8b5af1cb54b54643d45a, regionState=OPENING, regionLocation=d7d59561a64e,39133,1732135193457 2024-11-20T20:40:09,722 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=cbb091bbe0cb6e3e9160a6fb0a0a4f3e, regionState=OPENING, regionLocation=d7d59561a64e,39133,1732135193457 2024-11-20T20:40:09,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=cbb091bbe0cb6e3e9160a6fb0a0a4f3e, ASSIGN because future has completed 2024-11-20T20:40:09,725 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure cbb091bbe0cb6e3e9160a6fb0a0a4f3e, server=d7d59561a64e,39133,1732135193457}] 2024-11-20T20:40:09,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4b96a5b3a18a8b5af1cb54b54643d45a, ASSIGN because future has completed 2024-11-20T20:40:09,726 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4b96a5b3a18a8b5af1cb54b54643d45a, server=d7d59561a64e,39133,1732135193457}] 2024-11-20T20:40:09,880 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. 2024-11-20T20:40:09,881 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 4b96a5b3a18a8b5af1cb54b54643d45a, NAME => 'TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-20T20:40:09,881 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:09,881 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:40:09,881 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:09,881 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:09,882 INFO [StoreOpener-4b96a5b3a18a8b5af1cb54b54643d45a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:09,883 INFO [StoreOpener-4b96a5b3a18a8b5af1cb54b54643d45a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4b96a5b3a18a8b5af1cb54b54643d45a columnFamilyName info 2024-11-20T20:40:09,883 DEBUG [StoreOpener-4b96a5b3a18a8b5af1cb54b54643d45a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:09,893 DEBUG [StoreOpener-4b96a5b3a18a8b5af1cb54b54643d45a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/519ce62031d64053a936145f91240220.b0aa728e838aa4bdcbfc7aeb3ad338bd->hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/519ce62031d64053a936145f91240220-top 2024-11-20T20:40:09,898 DEBUG [StoreOpener-4b96a5b3a18a8b5af1cb54b54643d45a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/TestLogRolling-testLogRolling=b0aa728e838aa4bdcbfc7aeb3ad338bd-4d4020e57ee8492c8d332e515d2bed11 2024-11-20T20:40:09,902 DEBUG [StoreOpener-4b96a5b3a18a8b5af1cb54b54643d45a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/TestLogRolling-testLogRolling=b0aa728e838aa4bdcbfc7aeb3ad338bd-5a77e4f1e96a4f8f9f9e0ce0a7cee28b 2024-11-20T20:40:09,902 INFO [StoreOpener-4b96a5b3a18a8b5af1cb54b54643d45a-1 {}] regionserver.HStore(327): Store=4b96a5b3a18a8b5af1cb54b54643d45a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:40:09,902 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:09,903 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:09,904 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:09,904 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:09,904 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:09,906 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:09,906 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 4b96a5b3a18a8b5af1cb54b54643d45a; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=693069, jitterRate=-0.11871720850467682}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T20:40:09,907 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:09,907 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 4b96a5b3a18a8b5af1cb54b54643d45a: Running coprocessor pre-open hook at 1732135209881Writing region info on filesystem at 1732135209881Initializing all the Stores at 1732135209882 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135209882Cleaning up temporary data from old regions at 1732135209904 (+22 ms)Running coprocessor post-open hooks at 1732135209907 (+3 ms)Region opened successfully at 1732135209907 2024-11-20T20:40:10,039 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a., pid=13, masterSystemTime=1732135209877 2024-11-20T20:40:10,039 INFO [FSHLog-0-hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d-prefix:d7d59561a64e,39133,1732135193457 {}] wal.AbstractFSWAL(1368): Slow sync cost: 131 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45765,DS-42a1dab6-edd1-4c46-b8f0-2f78920a7878,DISK], DatanodeInfoWithStorage[127.0.0.1:40355,DS-1a0236ec-534c-4cca-8a45-a37b1e3677ce,DISK]] 2024-11-20T20:40:10,039 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 4b96a5b3a18a8b5af1cb54b54643d45a:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T20:40:10,039 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T20:40:10,039 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:10,043 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. 2024-11-20T20:40:10,043 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1541): 4b96a5b3a18a8b5af1cb54b54643d45a/info is initiating minor compaction (all files) 2024-11-20T20:40:10,043 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4b96a5b3a18a8b5af1cb54b54643d45a/info in TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. 2024-11-20T20:40:10,044 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/519ce62031d64053a936145f91240220.b0aa728e838aa4bdcbfc7aeb3ad338bd->hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/519ce62031d64053a936145f91240220-top, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/TestLogRolling-testLogRolling=b0aa728e838aa4bdcbfc7aeb3ad338bd-5a77e4f1e96a4f8f9f9e0ce0a7cee28b, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/TestLogRolling-testLogRolling=b0aa728e838aa4bdcbfc7aeb3ad338bd-4d4020e57ee8492c8d332e515d2bed11] into tmpdir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp, totalSize=116.0 K 2024-11-20T20:40:10,045 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. 2024-11-20T20:40:10,045 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. 2024-11-20T20:40:10,046 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e. 2024-11-20T20:40:10,046 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting 519ce62031d64053a936145f91240220.b0aa728e838aa4bdcbfc7aeb3ad338bd, keycount=38, bloomtype=ROW, size=86.3 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1732135204975 2024-11-20T20:40:10,046 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => cbb091bbe0cb6e3e9160a6fb0a0a4f3e, NAME => 'TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-20T20:40:10,047 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling cbb091bbe0cb6e3e9160a6fb0a0a4f3e 2024-11-20T20:40:10,047 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:40:10,047 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for cbb091bbe0cb6e3e9160a6fb0a0a4f3e 2024-11-20T20:40:10,047 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=b0aa728e838aa4bdcbfc7aeb3ad338bd-5a77e4f1e96a4f8f9f9e0ce0a7cee28b, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732135209160 2024-11-20T20:40:10,047 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for cbb091bbe0cb6e3e9160a6fb0a0a4f3e 2024-11-20T20:40:10,047 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=4b96a5b3a18a8b5af1cb54b54643d45a, regionState=OPEN, openSeqNum=131, regionLocation=d7d59561a64e,39133,1732135193457 2024-11-20T20:40:10,047 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=b0aa728e838aa4bdcbfc7aeb3ad338bd-4d4020e57ee8492c8d332e515d2bed11, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732135209184 2024-11-20T20:40:10,048 INFO [StoreOpener-cbb091bbe0cb6e3e9160a6fb0a0a4f3e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region cbb091bbe0cb6e3e9160a6fb0a0a4f3e 2024-11-20T20:40:10,049 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-20T20:40:10,049 INFO [StoreOpener-cbb091bbe0cb6e3e9160a6fb0a0a4f3e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cbb091bbe0cb6e3e9160a6fb0a0a4f3e columnFamilyName info 2024-11-20T20:40:10,049 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-20T20:40:10,049 DEBUG [StoreOpener-cbb091bbe0cb6e3e9160a6fb0a0a4f3e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:10,049 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-20T20:40:10,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4b96a5b3a18a8b5af1cb54b54643d45a, server=d7d59561a64e,39133,1732135193457 because future has completed 2024-11-20T20:40:10,053 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-20T20:40:10,053 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 4b96a5b3a18a8b5af1cb54b54643d45a, server=d7d59561a64e,39133,1732135193457 in 325 msec 2024-11-20T20:40:10,055 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4b96a5b3a18a8b5af1cb54b54643d45a, ASSIGN in 485 msec 2024-11-20T20:40:10,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:10,061 DEBUG [StoreOpener-cbb091bbe0cb6e3e9160a6fb0a0a4f3e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/cbb091bbe0cb6e3e9160a6fb0a0a4f3e/info/519ce62031d64053a936145f91240220.b0aa728e838aa4bdcbfc7aeb3ad338bd->hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/519ce62031d64053a936145f91240220-bottom 2024-11-20T20:40:10,061 INFO [StoreOpener-cbb091bbe0cb6e3e9160a6fb0a0a4f3e-1 {}] regionserver.HStore(327): Store=cbb091bbe0cb6e3e9160a6fb0a0a4f3e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:40:10,061 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for cbb091bbe0cb6e3e9160a6fb0a0a4f3e 2024-11-20T20:40:10,069 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/.tmp/info/ba680851f3404d869342df68f3bc2d8c is 193, key is TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a./info:regioninfo/1732135210047/Put/seqid=0 2024-11-20T20:40:10,072 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/cbb091bbe0cb6e3e9160a6fb0a0a4f3e 2024-11-20T20:40:10,074 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4b96a5b3a18a8b5af1cb54b54643d45a#info#compaction#68 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T20:40:10,074 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/cbb091bbe0cb6e3e9160a6fb0a0a4f3e 2024-11-20T20:40:10,074 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/611cfea37d114275acdffdfb8c63caa1 is 1080, key is row0062/info:/1732135207122/Put/seqid=0 2024-11-20T20:40:10,074 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for cbb091bbe0cb6e3e9160a6fb0a0a4f3e 2024-11-20T20:40:10,074 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for cbb091bbe0cb6e3e9160a6fb0a0a4f3e 2024-11-20T20:40:10,077 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for cbb091bbe0cb6e3e9160a6fb0a0a4f3e 2024-11-20T20:40:10,078 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened cbb091bbe0cb6e3e9160a6fb0a0a4f3e; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=710835, jitterRate=-0.09612676501274109}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T20:40:10,078 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cbb091bbe0cb6e3e9160a6fb0a0a4f3e 2024-11-20T20:40:10,078 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for cbb091bbe0cb6e3e9160a6fb0a0a4f3e: Running coprocessor pre-open hook at 1732135210047Writing region info on filesystem at 1732135210047Initializing all the Stores at 1732135210048 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135210048Cleaning up temporary data from old regions at 1732135210074 (+26 ms)Running coprocessor post-open hooks at 1732135210078 (+4 ms)Region opened successfully at 1732135210078 2024-11-20T20:40:10,079 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e., pid=12, masterSystemTime=1732135209877 2024-11-20T20:40:10,079 DEBUG [RS:0;d7d59561a64e:39133-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-20T20:40:10,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741853_1029 (size=9882) 2024-11-20T20:40:10,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741853_1029 (size=9882) 2024-11-20T20:40:10,080 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store cbb091bbe0cb6e3e9160a6fb0a0a4f3e:info, priority=-2147483648, current under compaction store size is 2 2024-11-20T20:40:10,080 INFO [RS:0;d7d59561a64e:39133-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e. 2024-11-20T20:40:10,080 DEBUG [RS:0;d7d59561a64e:39133-longCompactions-0 {}] regionserver.HStore(1541): cbb091bbe0cb6e3e9160a6fb0a0a4f3e/info is initiating minor compaction (all files) 2024-11-20T20:40:10,080 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:10,080 INFO [RS:0;d7d59561a64e:39133-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of cbb091bbe0cb6e3e9160a6fb0a0a4f3e/info in TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e. 2024-11-20T20:40:10,081 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/.tmp/info/ba680851f3404d869342df68f3bc2d8c 2024-11-20T20:40:10,081 INFO [RS:0;d7d59561a64e:39133-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/cbb091bbe0cb6e3e9160a6fb0a0a4f3e/info/519ce62031d64053a936145f91240220.b0aa728e838aa4bdcbfc7aeb3ad338bd->hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/519ce62031d64053a936145f91240220-bottom] into tmpdir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/cbb091bbe0cb6e3e9160a6fb0a0a4f3e/.tmp, totalSize=86.3 K 2024-11-20T20:40:10,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741854_1030 (size=42984) 2024-11-20T20:40:10,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741854_1030 (size=42984) 2024-11-20T20:40:10,081 DEBUG [RS:0;d7d59561a64e:39133-longCompactions-0 {}] compactions.Compactor(225): Compacting 519ce62031d64053a936145f91240220.b0aa728e838aa4bdcbfc7aeb3ad338bd, keycount=38, bloomtype=ROW, size=86.3 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1732135204975 2024-11-20T20:40:10,083 DEBUG [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e. 2024-11-20T20:40:10,083 INFO [RS_OPEN_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e. 2024-11-20T20:40:10,084 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=cbb091bbe0cb6e3e9160a6fb0a0a4f3e, regionState=OPEN, openSeqNum=131, regionLocation=d7d59561a64e,39133,1732135193457 2024-11-20T20:40:10,086 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41021 {}] assignment.AssignmentManager(1535): Unable to acquire lock for regionNode state=OPEN, location=d7d59561a64e,39133,1732135193457, table=TestLogRolling-testLogRolling, region=cbb091bbe0cb6e3e9160a6fb0a0a4f3e. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-20T20:40:10,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure cbb091bbe0cb6e3e9160a6fb0a0a4f3e, server=d7d59561a64e,39133,1732135193457 because future has completed 2024-11-20T20:40:10,092 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-20T20:40:10,092 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure cbb091bbe0cb6e3e9160a6fb0a0a4f3e, server=d7d59561a64e,39133,1732135193457 in 363 msec 2024-11-20T20:40:10,095 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-20T20:40:10,095 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=cbb091bbe0cb6e3e9160a6fb0a0a4f3e, ASSIGN in 523 msec 2024-11-20T20:40:10,097 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b0aa728e838aa4bdcbfc7aeb3ad338bd, daughterA=cbb091bbe0cb6e3e9160a6fb0a0a4f3e, daughterB=4b96a5b3a18a8b5af1cb54b54643d45a in 872 msec 2024-11-20T20:40:10,104 INFO [RS:0;d7d59561a64e:39133-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbb091bbe0cb6e3e9160a6fb0a0a4f3e#info#compaction#69 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T20:40:10,106 DEBUG [RS:0;d7d59561a64e:39133-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/cbb091bbe0cb6e3e9160a6fb0a0a4f3e/.tmp/info/b7c69660134f47c89697b54b7293a6cf is 1080, key is row0001/info:/1732135204975/Put/seqid=0 2024-11-20T20:40:10,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/.tmp/ns/1b3c5f6bf558495492581aa91955c9d3 is 43, key is default/ns:d/1732135194857/Put/seqid=0 2024-11-20T20:40:10,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741856_1032 (size=5153) 2024-11-20T20:40:10,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741855_1031 (size=70862) 2024-11-20T20:40:10,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741855_1031 (size=70862) 2024-11-20T20:40:10,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741856_1032 (size=5153) 2024-11-20T20:40:10,114 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/.tmp/ns/1b3c5f6bf558495492581aa91955c9d3 2024-11-20T20:40:10,119 DEBUG [RS:0;d7d59561a64e:39133-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/cbb091bbe0cb6e3e9160a6fb0a0a4f3e/.tmp/info/b7c69660134f47c89697b54b7293a6cf as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/cbb091bbe0cb6e3e9160a6fb0a0a4f3e/info/b7c69660134f47c89697b54b7293a6cf 2024-11-20T20:40:10,125 INFO [RS:0;d7d59561a64e:39133-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in cbb091bbe0cb6e3e9160a6fb0a0a4f3e/info of cbb091bbe0cb6e3e9160a6fb0a0a4f3e into b7c69660134f47c89697b54b7293a6cf(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T20:40:10,125 DEBUG [RS:0;d7d59561a64e:39133-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for cbb091bbe0cb6e3e9160a6fb0a0a4f3e: 2024-11-20T20:40:10,125 INFO [RS:0;d7d59561a64e:39133-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e., storeName=cbb091bbe0cb6e3e9160a6fb0a0a4f3e/info, priority=15, startTime=1732135210079; duration=0sec 2024-11-20T20:40:10,126 DEBUG [RS:0;d7d59561a64e:39133-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:10,126 DEBUG [RS:0;d7d59561a64e:39133-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbb091bbe0cb6e3e9160a6fb0a0a4f3e:info 2024-11-20T20:40:10,138 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/.tmp/table/017a1f4fc3e8498ab4ce352e7f57876d is 65, key is TestLogRolling-testLogRolling/table:state/1732135195279/Put/seqid=0 2024-11-20T20:40:10,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741857_1033 (size=5340) 2024-11-20T20:40:10,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741857_1033 (size=5340) 2024-11-20T20:40:10,143 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/.tmp/table/017a1f4fc3e8498ab4ce352e7f57876d 2024-11-20T20:40:10,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/.tmp/info/ba680851f3404d869342df68f3bc2d8c as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/info/ba680851f3404d869342df68f3bc2d8c 2024-11-20T20:40:10,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/info/ba680851f3404d869342df68f3bc2d8c, entries=30, sequenceid=17, filesize=9.7 K 2024-11-20T20:40:10,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/.tmp/ns/1b3c5f6bf558495492581aa91955c9d3 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/ns/1b3c5f6bf558495492581aa91955c9d3 2024-11-20T20:40:10,159 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/ns/1b3c5f6bf558495492581aa91955c9d3, entries=2, sequenceid=17, filesize=5.0 K 2024-11-20T20:40:10,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/.tmp/table/017a1f4fc3e8498ab4ce352e7f57876d as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/table/017a1f4fc3e8498ab4ce352e7f57876d 2024-11-20T20:40:10,165 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/table/017a1f4fc3e8498ab4ce352e7f57876d, entries=2, sequenceid=17, filesize=5.2 K 2024-11-20T20:40:10,166 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 117ms, sequenceid=17, compaction requested=false 2024-11-20T20:40:10,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-20T20:40:10,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:10,487 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/611cfea37d114275acdffdfb8c63caa1 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/611cfea37d114275acdffdfb8c63caa1 2024-11-20T20:40:10,493 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4b96a5b3a18a8b5af1cb54b54643d45a/info of 4b96a5b3a18a8b5af1cb54b54643d45a into 611cfea37d114275acdffdfb8c63caa1(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T20:40:10,493 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:10,493 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a., storeName=4b96a5b3a18a8b5af1cb54b54643d45a/info, priority=13, startTime=1732135210039; duration=0sec 2024-11-20T20:40:10,493 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:10,493 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4b96a5b3a18a8b5af1cb54b54643d45a:info 2024-11-20T20:40:10,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:10,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:11,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:11,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50792 deadline: 1732135221200, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. is not online on d7d59561a64e,39133,1732135193457 2024-11-20T20:40:11,229 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd., hostname=d7d59561a64e,39133,1732135193457, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd., hostname=d7d59561a64e,39133,1732135193457, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. is not online on d7d59561a64e,39133,1732135193457 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T20:40:11,230 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd., hostname=d7d59561a64e,39133,1732135193457, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd. is not online on d7d59561a64e,39133,1732135193457 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T20:40:11,230 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732135194896.b0aa728e838aa4bdcbfc7aeb3ad338bd., hostname=d7d59561a64e,39133,1732135193457, seqNum=2 from cache 2024-11-20T20:40:11,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:11,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:11,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:12,058 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:12,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:12,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:12,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:13,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:13,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:13,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:13,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:14,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:14,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:14,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,448 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,448 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,483 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,483 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,483 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,495 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:14,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:14,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:15,005 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T20:40:15,006 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,007 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,007 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,007 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,007 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,008 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,045 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,045 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,056 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:15,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:15,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:15,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:15,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:16,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:16,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:16,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:16,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:17,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:17,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:17,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:17,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:18,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:18,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:18,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:18,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:19,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:19,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:19,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:19,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:20,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:20,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:20,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:20,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:21,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:21,291 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a., hostname=d7d59561a64e,39133,1732135193457, seqNum=131] 2024-11-20T20:40:21,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:21,303 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4b96a5b3a18a8b5af1cb54b54643d45a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T20:40:21,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/766ac13028284186acaef3a157fec8af is 1080, key is row0097/info:/1732135221292/Put/seqid=0 2024-11-20T20:40:21,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741858_1034 (size=12516) 2024-11-20T20:40:21,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741858_1034 (size=12516) 2024-11-20T20:40:21,314 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/766ac13028284186acaef3a157fec8af 2024-11-20T20:40:21,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/766ac13028284186acaef3a157fec8af as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/766ac13028284186acaef3a157fec8af 2024-11-20T20:40:21,330 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/766ac13028284186acaef3a157fec8af, entries=7, sequenceid=141, filesize=12.2 K 2024-11-20T20:40:21,331 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 4b96a5b3a18a8b5af1cb54b54643d45a in 28ms, sequenceid=141, compaction requested=false 2024-11-20T20:40:21,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:21,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:21,332 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4b96a5b3a18a8b5af1cb54b54643d45a 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-20T20:40:21,335 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/c65adb3fc7fd476b85005bf021f022a4 is 1080, key is row0104/info:/1732135221304/Put/seqid=0 2024-11-20T20:40:21,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741859_1035 (size=20078) 2024-11-20T20:40:21,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741859_1035 (size=20078) 2024-11-20T20:40:21,347 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/c65adb3fc7fd476b85005bf021f022a4 2024-11-20T20:40:21,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/c65adb3fc7fd476b85005bf021f022a4 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/c65adb3fc7fd476b85005bf021f022a4 2024-11-20T20:40:21,358 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/c65adb3fc7fd476b85005bf021f022a4, entries=14, sequenceid=158, filesize=19.6 K 2024-11-20T20:40:21,359 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 4b96a5b3a18a8b5af1cb54b54643d45a in 27ms, sequenceid=158, compaction requested=true 2024-11-20T20:40:21,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:21,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4b96a5b3a18a8b5af1cb54b54643d45a:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T20:40:21,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:21,359 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T20:40:21,365 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 75578 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T20:40:21,365 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1541): 4b96a5b3a18a8b5af1cb54b54643d45a/info is initiating minor compaction (all files) 2024-11-20T20:40:21,365 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4b96a5b3a18a8b5af1cb54b54643d45a/info in TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. 2024-11-20T20:40:21,365 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/611cfea37d114275acdffdfb8c63caa1, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/766ac13028284186acaef3a157fec8af, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/c65adb3fc7fd476b85005bf021f022a4] into tmpdir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp, totalSize=73.8 K 2024-11-20T20:40:21,365 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting 611cfea37d114275acdffdfb8c63caa1, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732135207122 2024-11-20T20:40:21,366 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting 766ac13028284186acaef3a157fec8af, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1732135221292 2024-11-20T20:40:21,366 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting c65adb3fc7fd476b85005bf021f022a4, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732135221304 2024-11-20T20:40:21,382 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4b96a5b3a18a8b5af1cb54b54643d45a#info#compaction#74 average throughput is 57.46 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T20:40:21,382 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/de2895624bdf418484f32d96215f979c is 1080, key is row0062/info:/1732135207122/Put/seqid=0 2024-11-20T20:40:21,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741860_1036 (size=65792) 2024-11-20T20:40:21,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741860_1036 (size=65792) 2024-11-20T20:40:21,394 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/de2895624bdf418484f32d96215f979c as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/de2895624bdf418484f32d96215f979c 2024-11-20T20:40:21,399 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4b96a5b3a18a8b5af1cb54b54643d45a/info of 4b96a5b3a18a8b5af1cb54b54643d45a into de2895624bdf418484f32d96215f979c(size=64.3 K), total size for store is 64.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T20:40:21,399 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:21,399 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a., storeName=4b96a5b3a18a8b5af1cb54b54643d45a/info, priority=13, startTime=1732135221359; duration=0sec 2024-11-20T20:40:21,399 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:21,399 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4b96a5b3a18a8b5af1cb54b54643d45a:info 2024-11-20T20:40:21,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:21,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:21,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:22,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:22,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:22,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:22,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:23,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:23,269 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T20:40:23,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:23,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4b96a5b3a18a8b5af1cb54b54643d45a 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-20T20:40:23,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/0c980692600f4daba3140170f4d9f1da is 1080, key is row0118/info:/1732135221333/Put/seqid=0 2024-11-20T20:40:23,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741861_1037 (size=17906) 2024-11-20T20:40:23,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741861_1037 (size=17906) 2024-11-20T20:40:23,366 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/0c980692600f4daba3140170f4d9f1da 2024-11-20T20:40:23,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/0c980692600f4daba3140170f4d9f1da as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/0c980692600f4daba3140170f4d9f1da 2024-11-20T20:40:23,377 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/0c980692600f4daba3140170f4d9f1da, entries=12, sequenceid=174, filesize=17.5 K 2024-11-20T20:40:23,378 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=7.36 KB/7532 for 4b96a5b3a18a8b5af1cb54b54643d45a in 25ms, sequenceid=174, compaction requested=false 2024-11-20T20:40:23,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:23,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:23,379 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4b96a5b3a18a8b5af1cb54b54643d45a 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-20T20:40:23,386 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/96a3efa13ad64b6aae62676bafcd367a is 1080, key is row0130/info:/1732135223355/Put/seqid=0 2024-11-20T20:40:23,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741862_1038 (size=13594) 2024-11-20T20:40:23,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741862_1038 (size=13594) 2024-11-20T20:40:23,402 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/96a3efa13ad64b6aae62676bafcd367a 2024-11-20T20:40:23,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/96a3efa13ad64b6aae62676bafcd367a as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/96a3efa13ad64b6aae62676bafcd367a 2024-11-20T20:40:23,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:23,417 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/96a3efa13ad64b6aae62676bafcd367a, entries=8, sequenceid=185, filesize=13.3 K 2024-11-20T20:40:23,418 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=15.76 KB/16140 for 4b96a5b3a18a8b5af1cb54b54643d45a in 39ms, sequenceid=185, compaction requested=true 2024-11-20T20:40:23,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:23,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4b96a5b3a18a8b5af1cb54b54643d45a:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T20:40:23,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:23,418 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T20:40:23,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:23,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4b96a5b3a18a8b5af1cb54b54643d45a 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-20T20:40:23,420 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 97292 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T20:40:23,420 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1541): 4b96a5b3a18a8b5af1cb54b54643d45a/info is initiating minor compaction (all files) 2024-11-20T20:40:23,420 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4b96a5b3a18a8b5af1cb54b54643d45a/info in TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. 2024-11-20T20:40:23,420 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/de2895624bdf418484f32d96215f979c, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/0c980692600f4daba3140170f4d9f1da, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/96a3efa13ad64b6aae62676bafcd367a] into tmpdir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp, totalSize=95.0 K 2024-11-20T20:40:23,420 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting de2895624bdf418484f32d96215f979c, keycount=56, bloomtype=ROW, size=64.3 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732135207122 2024-11-20T20:40:23,421 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0c980692600f4daba3140170f4d9f1da, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732135221333 2024-11-20T20:40:23,421 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting 96a3efa13ad64b6aae62676bafcd367a, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1732135223355 2024-11-20T20:40:23,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/1b38bffdc8bc486390a31b4b62f40bef is 1080, key is row0138/info:/1732135223381/Put/seqid=0 2024-11-20T20:40:23,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741863_1039 (size=22238) 2024-11-20T20:40:23,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741863_1039 (size=22238) 2024-11-20T20:40:23,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/1b38bffdc8bc486390a31b4b62f40bef 2024-11-20T20:40:23,442 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4b96a5b3a18a8b5af1cb54b54643d45a#info#compaction#78 average throughput is 26.00 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T20:40:23,443 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/82d77e6e55074a22b6382b14252bd903 is 1080, key is row0062/info:/1732135207122/Put/seqid=0 2024-11-20T20:40:23,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/1b38bffdc8bc486390a31b4b62f40bef as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/1b38bffdc8bc486390a31b4b62f40bef 2024-11-20T20:40:23,449 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/1b38bffdc8bc486390a31b4b62f40bef, entries=16, sequenceid=204, filesize=21.7 K 2024-11-20T20:40:23,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741864_1040 (size=87527) 2024-11-20T20:40:23,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=7.36 KB/7532 for 4b96a5b3a18a8b5af1cb54b54643d45a in 31ms, sequenceid=204, compaction requested=false 2024-11-20T20:40:23,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:23,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741864_1040 (size=87527) 2024-11-20T20:40:23,457 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/82d77e6e55074a22b6382b14252bd903 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/82d77e6e55074a22b6382b14252bd903 2024-11-20T20:40:23,465 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4b96a5b3a18a8b5af1cb54b54643d45a/info of 4b96a5b3a18a8b5af1cb54b54643d45a into 82d77e6e55074a22b6382b14252bd903(size=85.5 K), total size for store is 107.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T20:40:23,465 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:23,465 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a., storeName=4b96a5b3a18a8b5af1cb54b54643d45a/info, priority=13, startTime=1732135223418; duration=0sec 2024-11-20T20:40:23,466 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:23,466 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4b96a5b3a18a8b5af1cb54b54643d45a:info 2024-11-20T20:40:23,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:23,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:24,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:24,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:24,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:24,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:25,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:25,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:25,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:25,439 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4b96a5b3a18a8b5af1cb54b54643d45a 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-20T20:40:25,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/699071ce5443400587e5878833d274a0 is 1080, key is row0154/info:/1732135223420/Put/seqid=0 2024-11-20T20:40:25,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741865_1041 (size=13594) 2024-11-20T20:40:25,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741865_1041 (size=13594) 2024-11-20T20:40:25,452 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/699071ce5443400587e5878833d274a0 2024-11-20T20:40:25,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/699071ce5443400587e5878833d274a0 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/699071ce5443400587e5878833d274a0 2024-11-20T20:40:25,466 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/699071ce5443400587e5878833d274a0, entries=8, sequenceid=216, filesize=13.3 K 2024-11-20T20:40:25,467 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=11.56 KB/11836 for 4b96a5b3a18a8b5af1cb54b54643d45a in 28ms, sequenceid=216, compaction requested=true 2024-11-20T20:40:25,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:25,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4b96a5b3a18a8b5af1cb54b54643d45a:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T20:40:25,467 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T20:40:25,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:25,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:25,468 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4b96a5b3a18a8b5af1cb54b54643d45a 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-20T20:40:25,469 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T20:40:25,469 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1541): 4b96a5b3a18a8b5af1cb54b54643d45a/info is initiating minor compaction (all files) 2024-11-20T20:40:25,469 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4b96a5b3a18a8b5af1cb54b54643d45a/info in TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. 2024-11-20T20:40:25,469 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/82d77e6e55074a22b6382b14252bd903, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/1b38bffdc8bc486390a31b4b62f40bef, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/699071ce5443400587e5878833d274a0] into tmpdir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp, totalSize=120.5 K 2024-11-20T20:40:25,469 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting 82d77e6e55074a22b6382b14252bd903, keycount=76, bloomtype=ROW, size=85.5 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1732135207122 2024-11-20T20:40:25,470 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1b38bffdc8bc486390a31b4b62f40bef, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732135223381 2024-11-20T20:40:25,470 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting 699071ce5443400587e5878833d274a0, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732135223420 2024-11-20T20:40:25,472 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/73de82d16928457bb8987e01c2bd353d is 1080, key is row0162/info:/1732135225442/Put/seqid=0 2024-11-20T20:40:25,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741866_1042 (size=17906) 2024-11-20T20:40:25,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741866_1042 (size=17906) 2024-11-20T20:40:25,484 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/73de82d16928457bb8987e01c2bd353d 2024-11-20T20:40:25,492 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4b96a5b3a18a8b5af1cb54b54643d45a#info#compaction#81 average throughput is 34.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T20:40:25,493 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/2f93fe530c604cdfbec89c48bd4a512d is 1080, key is row0062/info:/1732135207122/Put/seqid=0 2024-11-20T20:40:25,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/73de82d16928457bb8987e01c2bd353d as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/73de82d16928457bb8987e01c2bd353d 2024-11-20T20:40:25,507 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/73de82d16928457bb8987e01c2bd353d, entries=12, sequenceid=231, filesize=17.5 K 2024-11-20T20:40:25,508 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=17.86 KB/18292 for 4b96a5b3a18a8b5af1cb54b54643d45a in 41ms, sequenceid=231, compaction requested=false 2024-11-20T20:40:25,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:25,509 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4b96a5b3a18a8b5af1cb54b54643d45a 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-20T20:40:25,513 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/ced2c75233514a2ca120b08546ef25c7 is 1080, key is row0174/info:/1732135225469/Put/seqid=0 2024-11-20T20:40:25,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741867_1043 (size=113509) 2024-11-20T20:40:25,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741867_1043 (size=113509) 2024-11-20T20:40:25,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741868_1044 (size=24394) 2024-11-20T20:40:25,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741868_1044 (size=24394) 2024-11-20T20:40:25,528 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/2f93fe530c604cdfbec89c48bd4a512d as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/2f93fe530c604cdfbec89c48bd4a512d 2024-11-20T20:40:25,529 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/ced2c75233514a2ca120b08546ef25c7 2024-11-20T20:40:25,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/ced2c75233514a2ca120b08546ef25c7 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/ced2c75233514a2ca120b08546ef25c7 2024-11-20T20:40:25,534 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4b96a5b3a18a8b5af1cb54b54643d45a/info of 4b96a5b3a18a8b5af1cb54b54643d45a into 2f93fe530c604cdfbec89c48bd4a512d(size=110.8 K), total size for store is 128.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T20:40:25,534 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:25,534 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a., storeName=4b96a5b3a18a8b5af1cb54b54643d45a/info, priority=13, startTime=1732135225467; duration=0sec 2024-11-20T20:40:25,534 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:25,534 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4b96a5b3a18a8b5af1cb54b54643d45a:info 2024-11-20T20:40:25,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/ced2c75233514a2ca120b08546ef25c7, entries=18, sequenceid=252, filesize=23.8 K 2024-11-20T20:40:25,539 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=1.05 KB/1076 for 4b96a5b3a18a8b5af1cb54b54643d45a in 31ms, sequenceid=252, compaction requested=true 2024-11-20T20:40:25,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:25,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4b96a5b3a18a8b5af1cb54b54643d45a:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T20:40:25,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:25,540 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T20:40:25,541 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 155809 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T20:40:25,541 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1541): 4b96a5b3a18a8b5af1cb54b54643d45a/info is initiating minor compaction (all files) 2024-11-20T20:40:25,541 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4b96a5b3a18a8b5af1cb54b54643d45a/info in TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. 2024-11-20T20:40:25,541 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/2f93fe530c604cdfbec89c48bd4a512d, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/73de82d16928457bb8987e01c2bd353d, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/ced2c75233514a2ca120b08546ef25c7] into tmpdir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp, totalSize=152.2 K 2024-11-20T20:40:25,542 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2f93fe530c604cdfbec89c48bd4a512d, keycount=100, bloomtype=ROW, size=110.8 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732135207122 2024-11-20T20:40:25,542 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting 73de82d16928457bb8987e01c2bd353d, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1732135225442 2024-11-20T20:40:25,542 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting ced2c75233514a2ca120b08546ef25c7, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732135225469 2024-11-20T20:40:25,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:25,552 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4b96a5b3a18a8b5af1cb54b54643d45a#info#compaction#83 average throughput is 66.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T20:40:25,553 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/097c311990794b488217e069ae0d9bec is 1080, key is row0062/info:/1732135207122/Put/seqid=0 2024-11-20T20:40:25,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741869_1045 (size=146156) 2024-11-20T20:40:25,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741869_1045 (size=146156) 2024-11-20T20:40:25,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:25,563 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/097c311990794b488217e069ae0d9bec as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/097c311990794b488217e069ae0d9bec 2024-11-20T20:40:25,570 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4b96a5b3a18a8b5af1cb54b54643d45a/info of 4b96a5b3a18a8b5af1cb54b54643d45a into 097c311990794b488217e069ae0d9bec(size=142.7 K), total size for store is 142.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T20:40:25,570 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:25,570 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a., storeName=4b96a5b3a18a8b5af1cb54b54643d45a/info, priority=13, startTime=1732135225540; duration=0sec 2024-11-20T20:40:25,570 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:25,570 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4b96a5b3a18a8b5af1cb54b54643d45a:info 2024-11-20T20:40:26,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:26,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:26,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:26,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:27,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:27,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:27,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:27,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4b96a5b3a18a8b5af1cb54b54643d45a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T20:40:27,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/dcd76c5cb2d349c292f4116f94dbf1be is 1080, key is row0192/info:/1732135225510/Put/seqid=0 2024-11-20T20:40:27,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741870_1046 (size=12522) 2024-11-20T20:40:27,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741870_1046 (size=12522) 2024-11-20T20:40:27,544 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/dcd76c5cb2d349c292f4116f94dbf1be 2024-11-20T20:40:27,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:27,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/dcd76c5cb2d349c292f4116f94dbf1be as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/dcd76c5cb2d349c292f4116f94dbf1be 2024-11-20T20:40:27,558 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/dcd76c5cb2d349c292f4116f94dbf1be, entries=7, sequenceid=264, filesize=12.2 K 2024-11-20T20:40:27,559 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 4b96a5b3a18a8b5af1cb54b54643d45a in 35ms, sequenceid=264, compaction requested=false 2024-11-20T20:40:27,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:27,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:27,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:27,561 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4b96a5b3a18a8b5af1cb54b54643d45a 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-20T20:40:27,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/ce3042c955e54766bb9c358c3092eacb is 1080, key is row0199/info:/1732135227525/Put/seqid=0 2024-11-20T20:40:27,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741871_1047 (size=21171) 2024-11-20T20:40:27,578 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/ce3042c955e54766bb9c358c3092eacb 2024-11-20T20:40:27,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741871_1047 (size=21171) 2024-11-20T20:40:27,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/ce3042c955e54766bb9c358c3092eacb as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/ce3042c955e54766bb9c358c3092eacb 2024-11-20T20:40:27,590 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/ce3042c955e54766bb9c358c3092eacb, entries=15, sequenceid=282, filesize=20.7 K 2024-11-20T20:40:27,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for 4b96a5b3a18a8b5af1cb54b54643d45a in 30ms, sequenceid=282, compaction requested=true 2024-11-20T20:40:27,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:27,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4b96a5b3a18a8b5af1cb54b54643d45a:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T20:40:27,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:27,591 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T20:40:27,592 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 179849 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T20:40:27,593 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1541): 4b96a5b3a18a8b5af1cb54b54643d45a/info is initiating minor compaction (all files) 2024-11-20T20:40:27,593 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4b96a5b3a18a8b5af1cb54b54643d45a/info in TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. 2024-11-20T20:40:27,593 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/097c311990794b488217e069ae0d9bec, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/dcd76c5cb2d349c292f4116f94dbf1be, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/ce3042c955e54766bb9c358c3092eacb] into tmpdir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp, totalSize=175.6 K 2024-11-20T20:40:27,593 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting 097c311990794b488217e069ae0d9bec, keycount=130, bloomtype=ROW, size=142.7 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732135207122 2024-11-20T20:40:27,594 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting dcd76c5cb2d349c292f4116f94dbf1be, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732135225510 2024-11-20T20:40:27,594 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting ce3042c955e54766bb9c358c3092eacb, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732135227525 2024-11-20T20:40:27,608 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4b96a5b3a18a8b5af1cb54b54643d45a#info#compaction#86 average throughput is 38.99 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T20:40:27,608 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/0195d644368544049338c13f07aad0b9 is 1080, key is row0062/info:/1732135207122/Put/seqid=0 2024-11-20T20:40:27,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741872_1048 (size=169999) 2024-11-20T20:40:27,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741872_1048 (size=169999) 2024-11-20T20:40:27,636 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/0195d644368544049338c13f07aad0b9 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/0195d644368544049338c13f07aad0b9 2024-11-20T20:40:27,644 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4b96a5b3a18a8b5af1cb54b54643d45a/info of 4b96a5b3a18a8b5af1cb54b54643d45a into 0195d644368544049338c13f07aad0b9(size=166.0 K), total size for store is 166.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T20:40:27,644 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:27,644 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a., storeName=4b96a5b3a18a8b5af1cb54b54643d45a/info, priority=13, startTime=1732135227591; duration=0sec 2024-11-20T20:40:27,644 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:27,644 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4b96a5b3a18a8b5af1cb54b54643d45a:info 2024-11-20T20:40:28,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:28,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:28,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:28,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:29,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:29,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:29,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:29,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:29,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:29,589 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4b96a5b3a18a8b5af1cb54b54643d45a 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-20T20:40:29,593 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/4ba1f56b9b3747e0bbe392ef07224b71 is 1080, key is row0214/info:/1732135227562/Put/seqid=0 2024-11-20T20:40:29,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741873_1049 (size=17918) 2024-11-20T20:40:29,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741873_1049 (size=17918) 2024-11-20T20:40:29,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4b96a5b3a18a8b5af1cb54b54643d45a, server=d7d59561a64e,39133,1732135193457 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-20T20:40:29,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50792 deadline: 1732135239624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4b96a5b3a18a8b5af1cb54b54643d45a, server=d7d59561a64e,39133,1732135193457 2024-11-20T20:40:29,626 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a., hostname=d7d59561a64e,39133,1732135193457, seqNum=131 , the old value is region=TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a., hostname=d7d59561a64e,39133,1732135193457, seqNum=131, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4b96a5b3a18a8b5af1cb54b54643d45a, server=d7d59561a64e,39133,1732135193457 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T20:40:29,626 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a., hostname=d7d59561a64e,39133,1732135193457, seqNum=131 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4b96a5b3a18a8b5af1cb54b54643d45a, server=d7d59561a64e,39133,1732135193457 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T20:40:29,626 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a., hostname=d7d59561a64e,39133,1732135193457, seqNum=131 because the exception is null or not the one we care about 2024-11-20T20:40:30,001 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/4ba1f56b9b3747e0bbe392ef07224b71 2024-11-20T20:40:30,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/4ba1f56b9b3747e0bbe392ef07224b71 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/4ba1f56b9b3747e0bbe392ef07224b71 2024-11-20T20:40:30,015 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/4ba1f56b9b3747e0bbe392ef07224b71, entries=12, sequenceid=298, filesize=17.5 K 2024-11-20T20:40:30,016 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=17.86 KB/18292 for 4b96a5b3a18a8b5af1cb54b54643d45a in 428ms, sequenceid=298, compaction requested=false 2024-11-20T20:40:30,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:30,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:30,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:30,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:30,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:31,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:31,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:31,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:31,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:32,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:32,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:32,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:32,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:33,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:33,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:33,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:33,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:34,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:34,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:34,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:34,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:35,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:35,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:35,530 INFO [master/d7d59561a64e:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T20:40:35,530 INFO [master/d7d59561a64e:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T20:40:35,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:35,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:36,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:36,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:36,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:36,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:37,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:37,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:37,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:37,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:37,713 DEBUG [master/d7d59561a64e:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=71, reuseRatio=88.75% 2024-11-20T20:40:37,714 DEBUG [master/d7d59561a64e:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-20T20:40:38,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:38,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:38,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:38,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:39,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:39,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:39,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:39,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:39,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:39,641 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4b96a5b3a18a8b5af1cb54b54643d45a 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-20T20:40:39,647 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/8e3acc3c2fa04db3b4bbedd0ba905a7b is 1080, key is row0226/info:/1732135229590/Put/seqid=0 2024-11-20T20:40:39,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741874_1050 (size=24412) 2024-11-20T20:40:39,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741874_1050 (size=24412) 2024-11-20T20:40:39,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/8e3acc3c2fa04db3b4bbedd0ba905a7b 2024-11-20T20:40:39,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/8e3acc3c2fa04db3b4bbedd0ba905a7b as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/8e3acc3c2fa04db3b4bbedd0ba905a7b 2024-11-20T20:40:39,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/8e3acc3c2fa04db3b4bbedd0ba905a7b, entries=18, sequenceid=319, filesize=23.8 K 2024-11-20T20:40:39,665 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=10.51 KB/10760 for 4b96a5b3a18a8b5af1cb54b54643d45a in 24ms, sequenceid=319, compaction requested=true 2024-11-20T20:40:39,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:39,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4b96a5b3a18a8b5af1cb54b54643d45a:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T20:40:39,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:39,665 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T20:40:39,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39133 {}] regionserver.HRegion(8855): Flush requested on 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:39,665 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4b96a5b3a18a8b5af1cb54b54643d45a 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-20T20:40:39,666 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 212329 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T20:40:39,666 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1541): 4b96a5b3a18a8b5af1cb54b54643d45a/info is initiating minor compaction (all files) 2024-11-20T20:40:39,666 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4b96a5b3a18a8b5af1cb54b54643d45a/info in TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. 2024-11-20T20:40:39,667 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/0195d644368544049338c13f07aad0b9, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/4ba1f56b9b3747e0bbe392ef07224b71, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/8e3acc3c2fa04db3b4bbedd0ba905a7b] into tmpdir=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp, totalSize=207.4 K 2024-11-20T20:40:39,667 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0195d644368544049338c13f07aad0b9, keycount=152, bloomtype=ROW, size=166.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732135207122 2024-11-20T20:40:39,667 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4ba1f56b9b3747e0bbe392ef07224b71, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1732135227562 2024-11-20T20:40:39,667 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8e3acc3c2fa04db3b4bbedd0ba905a7b, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732135229590 2024-11-20T20:40:39,669 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/12c9678b84b14c8a8a35d37094586772 is 1080, key is row0244/info:/1732135239643/Put/seqid=0 2024-11-20T20:40:39,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741875_1051 (size=16839) 2024-11-20T20:40:39,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741875_1051 (size=16839) 2024-11-20T20:40:39,678 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/12c9678b84b14c8a8a35d37094586772 2024-11-20T20:40:39,683 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4b96a5b3a18a8b5af1cb54b54643d45a#info#compaction#90 average throughput is 46.69 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T20:40:39,683 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/1a2c50017c164af0be84881209b2f99d is 1080, key is row0062/info:/1732135207122/Put/seqid=0 2024-11-20T20:40:39,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/12c9678b84b14c8a8a35d37094586772 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/12c9678b84b14c8a8a35d37094586772 2024-11-20T20:40:39,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741876_1052 (size=202479) 2024-11-20T20:40:39,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741876_1052 (size=202479) 2024-11-20T20:40:39,689 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/12c9678b84b14c8a8a35d37094586772, entries=11, sequenceid=333, filesize=16.4 K 2024-11-20T20:40:39,689 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/1a2c50017c164af0be84881209b2f99d as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/1a2c50017c164af0be84881209b2f99d 2024-11-20T20:40:39,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=2.10 KB/2152 for 4b96a5b3a18a8b5af1cb54b54643d45a in 24ms, sequenceid=333, compaction requested=false 2024-11-20T20:40:39,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:39,695 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4b96a5b3a18a8b5af1cb54b54643d45a/info of 4b96a5b3a18a8b5af1cb54b54643d45a into 1a2c50017c164af0be84881209b2f99d(size=197.7 K), total size for store is 214.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T20:40:39,695 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:39,695 INFO [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a., storeName=4b96a5b3a18a8b5af1cb54b54643d45a/info, priority=13, startTime=1732135239665; duration=0sec 2024-11-20T20:40:39,695 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T20:40:39,695 DEBUG [RS:0;d7d59561a64e:39133-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4b96a5b3a18a8b5af1cb54b54643d45a:info 2024-11-20T20:40:39,793 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-20T20:40:40,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:40,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:40,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:40,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:41,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:41,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:41,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:41,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:41,670 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-20T20:40:41,671 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C39133%2C1732135193457.1732135241670 2024-11-20T20:40:41,692 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:41,692 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:41,693 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:41,693 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:41,693 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:41,693 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/WALs/d7d59561a64e,39133,1732135193457/d7d59561a64e%2C39133%2C1732135193457.1732135194316 with entries=315, filesize=309.33 KB; new WAL /user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/WALs/d7d59561a64e,39133,1732135193457/d7d59561a64e%2C39133%2C1732135193457.1732135241670 2024-11-20T20:40:41,694 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37349:37349),(127.0.0.1/127.0.0.1:40741:40741)] 2024-11-20T20:40:41,694 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/WALs/d7d59561a64e,39133,1732135193457/d7d59561a64e%2C39133%2C1732135193457.1732135194316 is not closed yet, will try archiving it next time 2024-11-20T20:40:41,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741833_1009 (size=316763) 2024-11-20T20:40:41,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741833_1009 (size=316763) 2024-11-20T20:40:41,708 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for cbb091bbe0cb6e3e9160a6fb0a0a4f3e: 2024-11-20T20:40:41,708 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-20T20:40:41,714 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/.tmp/info/676f7d0b432d42d8ba161456c472c399 is 186, key is TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e./info:regioninfo/1732135210084/Put/seqid=0 2024-11-20T20:40:41,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741878_1054 (size=6153) 2024-11-20T20:40:41,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741878_1054 (size=6153) 2024-11-20T20:40:41,722 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/.tmp/info/676f7d0b432d42d8ba161456c472c399 2024-11-20T20:40:41,730 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/.tmp/info/676f7d0b432d42d8ba161456c472c399 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/info/676f7d0b432d42d8ba161456c472c399 2024-11-20T20:40:41,736 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/info/676f7d0b432d42d8ba161456c472c399, entries=5, sequenceid=21, filesize=6.0 K 2024-11-20T20:40:41,738 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=21, compaction requested=false 2024-11-20T20:40:41,738 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-20T20:40:41,738 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 4b96a5b3a18a8b5af1cb54b54643d45a 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-11-20T20:40:41,742 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/01f3076fb54e4f7bb54163fc7e899623 is 1080, key is row0255/info:/1732135239667/Put/seqid=0 2024-11-20T20:40:41,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741879_1055 (size=7116) 2024-11-20T20:40:41,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741879_1055 (size=7116) 2024-11-20T20:40:41,746 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/01f3076fb54e4f7bb54163fc7e899623 2024-11-20T20:40:41,750 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/.tmp/info/01f3076fb54e4f7bb54163fc7e899623 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/01f3076fb54e4f7bb54163fc7e899623 2024-11-20T20:40:41,754 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/01f3076fb54e4f7bb54163fc7e899623, entries=2, sequenceid=339, filesize=6.9 K 2024-11-20T20:40:41,755 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 4b96a5b3a18a8b5af1cb54b54643d45a in 17ms, sequenceid=339, compaction requested=true 2024-11-20T20:40:41,755 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 4b96a5b3a18a8b5af1cb54b54643d45a: 2024-11-20T20:40:41,755 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C39133%2C1732135193457.1732135241755 2024-11-20T20:40:41,759 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:41,759 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:41,760 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:41,760 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:41,760 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:41,760 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/WALs/d7d59561a64e,39133,1732135193457/d7d59561a64e%2C39133%2C1732135193457.1732135241670 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/WALs/d7d59561a64e,39133,1732135193457/d7d59561a64e%2C39133%2C1732135193457.1732135241755 2024-11-20T20:40:41,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741877_1053 (size=731) 2024-11-20T20:40:41,761 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37349:37349),(127.0.0.1/127.0.0.1:40741:40741)] 2024-11-20T20:40:41,761 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/WALs/d7d59561a64e,39133,1732135193457/d7d59561a64e%2C39133%2C1732135193457.1732135241670 is not closed yet, will try archiving it next time 2024-11-20T20:40:41,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741877_1053 (size=731) 2024-11-20T20:40:41,761 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/WALs/d7d59561a64e,39133,1732135193457/d7d59561a64e%2C39133%2C1732135193457.1732135194316 to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/oldWALs/d7d59561a64e%2C39133%2C1732135193457.1732135194316 2024-11-20T20:40:41,762 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-20T20:40:41,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T20:40:41,762 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T20:40:41,762 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:40:41,762 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/WALs/d7d59561a64e,39133,1732135193457/d7d59561a64e%2C39133%2C1732135193457.1732135241670 to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/oldWALs/d7d59561a64e%2C39133%2C1732135193457.1732135241670 2024-11-20T20:40:41,762 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:40:41,762 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:40:41,762 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T20:40:41,763 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T20:40:41,763 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1765835762, stopped=false 2024-11-20T20:40:41,763 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d7d59561a64e,41021,1732135193294 2024-11-20T20:40:41,859 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T20:40:41,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T20:40:41,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:41,859 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:41,860 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T20:40:41,860 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T20:40:41,860 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:40:41,860 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:40:41,860 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:40:41,860 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd7d59561a64e,39133,1732135193457' ***** 2024-11-20T20:40:41,860 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T20:40:41,860 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:40:41,860 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T20:40:41,860 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T20:40:41,861 INFO [RS:0;d7d59561a64e:39133 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T20:40:41,861 INFO [RS:0;d7d59561a64e:39133 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T20:40:41,861 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(3091): Received CLOSE for cbb091bbe0cb6e3e9160a6fb0a0a4f3e 2024-11-20T20:40:41,861 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(3091): Received CLOSE for 4b96a5b3a18a8b5af1cb54b54643d45a 2024-11-20T20:40:41,861 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(959): stopping server d7d59561a64e,39133,1732135193457 2024-11-20T20:40:41,861 INFO [RS:0;d7d59561a64e:39133 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T20:40:41,861 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing cbb091bbe0cb6e3e9160a6fb0a0a4f3e, disabling compactions & flushes 2024-11-20T20:40:41,861 INFO [RS:0;d7d59561a64e:39133 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d7d59561a64e:39133. 2024-11-20T20:40:41,861 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e. 2024-11-20T20:40:41,861 DEBUG [RS:0;d7d59561a64e:39133 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:40:41,861 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e. 2024-11-20T20:40:41,861 DEBUG [RS:0;d7d59561a64e:39133 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:40:41,861 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e. after waiting 0 ms 2024-11-20T20:40:41,861 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e. 2024-11-20T20:40:41,861 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T20:40:41,861 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T20:40:41,861 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T20:40:41,861 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T20:40:41,861 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-20T20:40:41,861 DEBUG [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(1325): Online Regions={cbb091bbe0cb6e3e9160a6fb0a0a4f3e=TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e., 1588230740=hbase:meta,,1.1588230740, 4b96a5b3a18a8b5af1cb54b54643d45a=TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.} 2024-11-20T20:40:41,861 DEBUG [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4b96a5b3a18a8b5af1cb54b54643d45a, cbb091bbe0cb6e3e9160a6fb0a0a4f3e 2024-11-20T20:40:41,861 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T20:40:41,861 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T20:40:41,861 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T20:40:41,862 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T20:40:41,862 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T20:40:41,861 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/cbb091bbe0cb6e3e9160a6fb0a0a4f3e/info/519ce62031d64053a936145f91240220.b0aa728e838aa4bdcbfc7aeb3ad338bd->hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/519ce62031d64053a936145f91240220-bottom] to archive 2024-11-20T20:40:41,862 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T20:40:41,864 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/cbb091bbe0cb6e3e9160a6fb0a0a4f3e/info/519ce62031d64053a936145f91240220.b0aa728e838aa4bdcbfc7aeb3ad338bd to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/cbb091bbe0cb6e3e9160a6fb0a0a4f3e/info/519ce62031d64053a936145f91240220.b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:41,864 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=d7d59561a64e:41021 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-20T20:40:41,864 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-20T20:40:41,865 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-20T20:40:41,866 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T20:40:41,866 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T20:40:41,866 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732135241861Running coprocessor pre-close hooks at 1732135241861Disabling compacts and flushes for region at 1732135241861Disabling writes for close at 1732135241862 (+1 ms)Writing region close event to WAL at 1732135241862Running coprocessor post-close hooks at 1732135241866 (+4 ms)Closed at 1732135241866 2024-11-20T20:40:41,866 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T20:40:41,868 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/cbb091bbe0cb6e3e9160a6fb0a0a4f3e/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-11-20T20:40:41,868 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e. 2024-11-20T20:40:41,868 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for cbb091bbe0cb6e3e9160a6fb0a0a4f3e: Waiting for close lock at 1732135241861Running coprocessor pre-close hooks at 1732135241861Disabling compacts and flushes for region at 1732135241861Disabling writes for close at 1732135241861Writing region close event to WAL at 1732135241865 (+4 ms)Running coprocessor post-close hooks at 1732135241868 (+3 ms)Closed at 1732135241868 2024-11-20T20:40:41,868 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732135209223.cbb091bbe0cb6e3e9160a6fb0a0a4f3e. 2024-11-20T20:40:41,868 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4b96a5b3a18a8b5af1cb54b54643d45a, disabling compactions & flushes 2024-11-20T20:40:41,868 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. 2024-11-20T20:40:41,868 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. 2024-11-20T20:40:41,868 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. after waiting 0 ms 2024-11-20T20:40:41,868 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. 2024-11-20T20:40:41,869 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/519ce62031d64053a936145f91240220.b0aa728e838aa4bdcbfc7aeb3ad338bd->hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/b0aa728e838aa4bdcbfc7aeb3ad338bd/info/519ce62031d64053a936145f91240220-top, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/TestLogRolling-testLogRolling=b0aa728e838aa4bdcbfc7aeb3ad338bd-5a77e4f1e96a4f8f9f9e0ce0a7cee28b, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/611cfea37d114275acdffdfb8c63caa1, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/TestLogRolling-testLogRolling=b0aa728e838aa4bdcbfc7aeb3ad338bd-4d4020e57ee8492c8d332e515d2bed11, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/766ac13028284186acaef3a157fec8af, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/de2895624bdf418484f32d96215f979c, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/c65adb3fc7fd476b85005bf021f022a4, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/0c980692600f4daba3140170f4d9f1da, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/82d77e6e55074a22b6382b14252bd903, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/96a3efa13ad64b6aae62676bafcd367a, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/1b38bffdc8bc486390a31b4b62f40bef, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/2f93fe530c604cdfbec89c48bd4a512d, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/699071ce5443400587e5878833d274a0, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/73de82d16928457bb8987e01c2bd353d, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/097c311990794b488217e069ae0d9bec, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/ced2c75233514a2ca120b08546ef25c7, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/dcd76c5cb2d349c292f4116f94dbf1be, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/0195d644368544049338c13f07aad0b9, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/ce3042c955e54766bb9c358c3092eacb, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/4ba1f56b9b3747e0bbe392ef07224b71, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/8e3acc3c2fa04db3b4bbedd0ba905a7b] to archive 2024-11-20T20:40:41,870 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T20:40:41,871 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/519ce62031d64053a936145f91240220.b0aa728e838aa4bdcbfc7aeb3ad338bd to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/519ce62031d64053a936145f91240220.b0aa728e838aa4bdcbfc7aeb3ad338bd 2024-11-20T20:40:41,873 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/TestLogRolling-testLogRolling=b0aa728e838aa4bdcbfc7aeb3ad338bd-5a77e4f1e96a4f8f9f9e0ce0a7cee28b to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/TestLogRolling-testLogRolling=b0aa728e838aa4bdcbfc7aeb3ad338bd-5a77e4f1e96a4f8f9f9e0ce0a7cee28b 2024-11-20T20:40:41,874 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/611cfea37d114275acdffdfb8c63caa1 to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/611cfea37d114275acdffdfb8c63caa1 2024-11-20T20:40:41,875 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/TestLogRolling-testLogRolling=b0aa728e838aa4bdcbfc7aeb3ad338bd-4d4020e57ee8492c8d332e515d2bed11 to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/TestLogRolling-testLogRolling=b0aa728e838aa4bdcbfc7aeb3ad338bd-4d4020e57ee8492c8d332e515d2bed11 2024-11-20T20:40:41,876 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/766ac13028284186acaef3a157fec8af to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/766ac13028284186acaef3a157fec8af 2024-11-20T20:40:41,877 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/de2895624bdf418484f32d96215f979c to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/de2895624bdf418484f32d96215f979c 2024-11-20T20:40:41,878 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/c65adb3fc7fd476b85005bf021f022a4 to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/c65adb3fc7fd476b85005bf021f022a4 2024-11-20T20:40:41,879 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/0c980692600f4daba3140170f4d9f1da to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/0c980692600f4daba3140170f4d9f1da 2024-11-20T20:40:41,879 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/82d77e6e55074a22b6382b14252bd903 to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/82d77e6e55074a22b6382b14252bd903 2024-11-20T20:40:41,880 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/96a3efa13ad64b6aae62676bafcd367a to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/96a3efa13ad64b6aae62676bafcd367a 2024-11-20T20:40:41,881 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/1b38bffdc8bc486390a31b4b62f40bef to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/1b38bffdc8bc486390a31b4b62f40bef 2024-11-20T20:40:41,882 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/2f93fe530c604cdfbec89c48bd4a512d to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/2f93fe530c604cdfbec89c48bd4a512d 2024-11-20T20:40:41,883 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/699071ce5443400587e5878833d274a0 to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/699071ce5443400587e5878833d274a0 2024-11-20T20:40:41,884 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/73de82d16928457bb8987e01c2bd353d to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/73de82d16928457bb8987e01c2bd353d 2024-11-20T20:40:41,885 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/097c311990794b488217e069ae0d9bec to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/097c311990794b488217e069ae0d9bec 2024-11-20T20:40:41,886 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/ced2c75233514a2ca120b08546ef25c7 to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/ced2c75233514a2ca120b08546ef25c7 2024-11-20T20:40:41,887 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/dcd76c5cb2d349c292f4116f94dbf1be to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/dcd76c5cb2d349c292f4116f94dbf1be 2024-11-20T20:40:41,888 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/0195d644368544049338c13f07aad0b9 to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/0195d644368544049338c13f07aad0b9 2024-11-20T20:40:41,889 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/ce3042c955e54766bb9c358c3092eacb to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/ce3042c955e54766bb9c358c3092eacb 2024-11-20T20:40:41,889 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/4ba1f56b9b3747e0bbe392ef07224b71 to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/4ba1f56b9b3747e0bbe392ef07224b71 2024-11-20T20:40:41,890 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/8e3acc3c2fa04db3b4bbedd0ba905a7b to hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/archive/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/info/8e3acc3c2fa04db3b4bbedd0ba905a7b 2024-11-20T20:40:41,890 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [611cfea37d114275acdffdfb8c63caa1=42984, 766ac13028284186acaef3a157fec8af=12516, de2895624bdf418484f32d96215f979c=65792, c65adb3fc7fd476b85005bf021f022a4=20078, 0c980692600f4daba3140170f4d9f1da=17906, 82d77e6e55074a22b6382b14252bd903=87527, 96a3efa13ad64b6aae62676bafcd367a=13594, 1b38bffdc8bc486390a31b4b62f40bef=22238, 2f93fe530c604cdfbec89c48bd4a512d=113509, 699071ce5443400587e5878833d274a0=13594, 73de82d16928457bb8987e01c2bd353d=17906, 097c311990794b488217e069ae0d9bec=146156, ced2c75233514a2ca120b08546ef25c7=24394, dcd76c5cb2d349c292f4116f94dbf1be=12522, 0195d644368544049338c13f07aad0b9=169999, ce3042c955e54766bb9c358c3092eacb=21171, 4ba1f56b9b3747e0bbe392ef07224b71=17918, 8e3acc3c2fa04db3b4bbedd0ba905a7b=24412] 2024-11-20T20:40:41,894 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/data/default/TestLogRolling-testLogRolling/4b96a5b3a18a8b5af1cb54b54643d45a/recovered.edits/342.seqid, newMaxSeqId=342, maxSeqId=130 2024-11-20T20:40:41,894 INFO [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. 2024-11-20T20:40:41,894 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4b96a5b3a18a8b5af1cb54b54643d45a: Waiting for close lock at 1732135241868Running coprocessor pre-close hooks at 1732135241868Disabling compacts and flushes for region at 1732135241868Disabling writes for close at 1732135241868Writing region close event to WAL at 1732135241891 (+23 ms)Running coprocessor post-close hooks at 1732135241894 (+3 ms)Closed at 1732135241894 2024-11-20T20:40:41,894 DEBUG [RS_CLOSE_REGION-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732135209223.4b96a5b3a18a8b5af1cb54b54643d45a. 2024-11-20T20:40:42,062 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(976): stopping server d7d59561a64e,39133,1732135193457; all regions closed. 2024-11-20T20:40:42,063 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:42,063 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:42,064 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:42,064 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:42,065 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:42,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741834_1010 (size=8107) 2024-11-20T20:40:42,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741834_1010 (size=8107) 2024-11-20T20:40:42,074 DEBUG [RS:0;d7d59561a64e:39133 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/oldWALs 2024-11-20T20:40:42,074 INFO [RS:0;d7d59561a64e:39133 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d7d59561a64e%2C39133%2C1732135193457.meta:.meta(num 1732135194782) 2024-11-20T20:40:42,075 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:42,075 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:42,075 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:42,075 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:42,075 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:42,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741880_1056 (size=780) 2024-11-20T20:40:42,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741880_1056 (size=780) 2024-11-20T20:40:42,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:42,177 INFO [regionserver/d7d59561a64e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T20:40:42,253 INFO [regionserver/d7d59561a64e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T20:40:42,254 INFO [regionserver/d7d59561a64e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T20:40:42,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:42,485 DEBUG [RS:0;d7d59561a64e:39133 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/oldWALs 2024-11-20T20:40:42,485 INFO [RS:0;d7d59561a64e:39133 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d7d59561a64e%2C39133%2C1732135193457:(num 1732135241755) 2024-11-20T20:40:42,485 DEBUG [RS:0;d7d59561a64e:39133 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:40:42,485 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T20:40:42,485 INFO [RS:0;d7d59561a64e:39133 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T20:40:42,485 INFO [RS:0;d7d59561a64e:39133 {}] hbase.ChoreService(370): Chore service for: regionserver/d7d59561a64e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T20:40:42,486 INFO [RS:0;d7d59561a64e:39133 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T20:40:42,486 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T20:40:42,486 INFO [RS:0;d7d59561a64e:39133 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39133 2024-11-20T20:40:42,554 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d7d59561a64e,39133,1732135193457 2024-11-20T20:40:42,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T20:40:42,554 INFO [RS:0;d7d59561a64e:39133 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T20:40:42,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:42,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:42,575 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d7d59561a64e,39133,1732135193457] 2024-11-20T20:40:42,586 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d7d59561a64e,39133,1732135193457 already deleted, retry=false 2024-11-20T20:40:42,586 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d7d59561a64e,39133,1732135193457 expired; onlineServers=0 2024-11-20T20:40:42,586 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd7d59561a64e,41021,1732135193294' ***** 2024-11-20T20:40:42,586 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T20:40:42,586 INFO [M:0;d7d59561a64e:41021 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T20:40:42,586 INFO [M:0;d7d59561a64e:41021 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T20:40:42,586 DEBUG [M:0;d7d59561a64e:41021 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T20:40:42,587 DEBUG [M:0;d7d59561a64e:41021 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T20:40:42,587 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T20:40:42,587 DEBUG [master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135193823 {}] cleaner.HFileCleaner(306): Exit Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135193823,5,FailOnTimeoutGroup] 2024-11-20T20:40:42,587 DEBUG [master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135193822 {}] cleaner.HFileCleaner(306): Exit Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135193822,5,FailOnTimeoutGroup] 2024-11-20T20:40:42,587 INFO [M:0;d7d59561a64e:41021 {}] hbase.ChoreService(370): Chore service for: master/d7d59561a64e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T20:40:42,587 INFO [M:0;d7d59561a64e:41021 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T20:40:42,587 DEBUG [M:0;d7d59561a64e:41021 {}] master.HMaster(1795): Stopping service threads 2024-11-20T20:40:42,588 INFO [M:0;d7d59561a64e:41021 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T20:40:42,588 INFO [M:0;d7d59561a64e:41021 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T20:40:42,588 INFO [M:0;d7d59561a64e:41021 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T20:40:42,589 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T20:40:42,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T20:40:42,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:42,597 DEBUG [M:0;d7d59561a64e:41021 {}] zookeeper.ZKUtil(347): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T20:40:42,597 WARN [M:0;d7d59561a64e:41021 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T20:40:42,598 INFO [M:0;d7d59561a64e:41021 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/.lastflushedseqids 2024-11-20T20:40:42,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741881_1057 (size=228) 2024-11-20T20:40:42,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741881_1057 (size=228) 2024-11-20T20:40:42,604 INFO [M:0;d7d59561a64e:41021 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T20:40:42,605 INFO [M:0;d7d59561a64e:41021 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T20:40:42,605 DEBUG [M:0;d7d59561a64e:41021 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T20:40:42,605 INFO [M:0;d7d59561a64e:41021 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:40:42,605 DEBUG [M:0;d7d59561a64e:41021 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:40:42,605 DEBUG [M:0;d7d59561a64e:41021 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T20:40:42,605 DEBUG [M:0;d7d59561a64e:41021 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:40:42,605 INFO [M:0;d7d59561a64e:41021 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.44 KB heapSize=63.39 KB 2024-11-20T20:40:42,618 DEBUG [M:0;d7d59561a64e:41021 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6ea6348f495244acacd3df3ae52fcab8 is 82, key is hbase:meta,,1/info:regioninfo/1732135194809/Put/seqid=0 2024-11-20T20:40:42,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741882_1058 (size=5672) 2024-11-20T20:40:42,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741882_1058 (size=5672) 2024-11-20T20:40:42,623 INFO [M:0;d7d59561a64e:41021 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6ea6348f495244acacd3df3ae52fcab8 2024-11-20T20:40:42,640 DEBUG [M:0;d7d59561a64e:41021 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7dddbcb6e345469594f37026c61e32ec is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732135195285/Put/seqid=0 2024-11-20T20:40:42,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741883_1059 (size=7091) 2024-11-20T20:40:42,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741883_1059 (size=7091) 2024-11-20T20:40:42,645 INFO [M:0;d7d59561a64e:41021 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.84 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7dddbcb6e345469594f37026c61e32ec 2024-11-20T20:40:42,648 INFO [M:0;d7d59561a64e:41021 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7dddbcb6e345469594f37026c61e32ec 2024-11-20T20:40:42,662 DEBUG [M:0;d7d59561a64e:41021 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7f5750e95d4f472c8d20e51ab5ab3129 is 69, key is d7d59561a64e,39133,1732135193457/rs:state/1732135194079/Put/seqid=0 2024-11-20T20:40:42,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741884_1060 (size=5156) 2024-11-20T20:40:42,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741884_1060 (size=5156) 2024-11-20T20:40:42,667 INFO [M:0;d7d59561a64e:41021 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7f5750e95d4f472c8d20e51ab5ab3129 2024-11-20T20:40:42,675 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:40:42,675 INFO [RS:0;d7d59561a64e:39133 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T20:40:42,675 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39133-0x10159f986630001, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:40:42,675 INFO [RS:0;d7d59561a64e:39133 {}] regionserver.HRegionServer(1031): Exiting; stopping=d7d59561a64e,39133,1732135193457; zookeeper connection closed. 2024-11-20T20:40:42,676 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@12fb2ff5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@12fb2ff5 2024-11-20T20:40:42,676 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T20:40:42,690 DEBUG [M:0;d7d59561a64e:41021 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b9feefe393054f9396eace4ac96b5bb7 is 52, key is load_balancer_on/state:d/1732135194892/Put/seqid=0 2024-11-20T20:40:42,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741885_1061 (size=5056) 2024-11-20T20:40:42,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741885_1061 (size=5056) 2024-11-20T20:40:42,695 INFO [M:0;d7d59561a64e:41021 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b9feefe393054f9396eace4ac96b5bb7 2024-11-20T20:40:42,700 DEBUG [M:0;d7d59561a64e:41021 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6ea6348f495244acacd3df3ae52fcab8 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6ea6348f495244acacd3df3ae52fcab8 2024-11-20T20:40:42,704 INFO [M:0;d7d59561a64e:41021 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6ea6348f495244acacd3df3ae52fcab8, entries=8, sequenceid=125, filesize=5.5 K 2024-11-20T20:40:42,705 DEBUG [M:0;d7d59561a64e:41021 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7dddbcb6e345469594f37026c61e32ec as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7dddbcb6e345469594f37026c61e32ec 2024-11-20T20:40:42,709 INFO [M:0;d7d59561a64e:41021 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7dddbcb6e345469594f37026c61e32ec 2024-11-20T20:40:42,709 INFO [M:0;d7d59561a64e:41021 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7dddbcb6e345469594f37026c61e32ec, entries=13, sequenceid=125, filesize=6.9 K 2024-11-20T20:40:42,710 DEBUG [M:0;d7d59561a64e:41021 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7f5750e95d4f472c8d20e51ab5ab3129 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7f5750e95d4f472c8d20e51ab5ab3129 2024-11-20T20:40:42,714 INFO [M:0;d7d59561a64e:41021 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7f5750e95d4f472c8d20e51ab5ab3129, entries=1, sequenceid=125, filesize=5.0 K 2024-11-20T20:40:42,714 DEBUG [M:0;d7d59561a64e:41021 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b9feefe393054f9396eace4ac96b5bb7 as hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b9feefe393054f9396eace4ac96b5bb7 2024-11-20T20:40:42,718 INFO [M:0;d7d59561a64e:41021 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43361/user/jenkins/test-data/b971cc34-83a3-d5bc-d68b-47f0f7877e7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b9feefe393054f9396eace4ac96b5bb7, entries=1, sequenceid=125, filesize=4.9 K 2024-11-20T20:40:42,719 INFO [M:0;d7d59561a64e:41021 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.44 KB/52675, heapSize ~63.33 KB/64848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=125, compaction requested=false 2024-11-20T20:40:42,720 INFO [M:0;d7d59561a64e:41021 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:40:42,720 DEBUG [M:0;d7d59561a64e:41021 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732135242605Disabling compacts and flushes for region at 1732135242605Disabling writes for close at 1732135242605Obtaining lock to block concurrent updates at 1732135242605Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732135242605Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52675, getHeapSize=64848, getOffHeapSize=0, getCellsCount=148 at 1732135242605Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732135242606 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732135242606Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732135242618 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732135242618Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732135242627 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732135242639 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732135242639Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732135242648 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732135242662 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732135242662Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732135242672 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732135242689 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732135242689Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5493b46d: reopening flushed file at 1732135242699 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@180e7983: reopening flushed file at 1732135242704 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@718838ad: reopening flushed file at 1732135242709 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c2f42f1: reopening flushed file at 1732135242714 (+5 ms)Finished flush of dataSize ~51.44 KB/52675, heapSize ~63.33 KB/64848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=125, compaction requested=false at 1732135242719 (+5 ms)Writing region close event to WAL at 1732135242720 (+1 ms)Closed at 1732135242720 2024-11-20T20:40:42,721 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:42,721 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:42,721 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:42,721 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:42,721 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:42,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40355 is added to blk_1073741830_1006 (size=61344) 2024-11-20T20:40:42,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45765 is added to blk_1073741830_1006 (size=61344) 2024-11-20T20:40:42,724 INFO [M:0;d7d59561a64e:41021 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T20:40:42,724 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T20:40:42,724 INFO [M:0;d7d59561a64e:41021 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41021 2024-11-20T20:40:42,724 INFO [M:0;d7d59561a64e:41021 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T20:40:42,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:40:42,833 INFO [M:0;d7d59561a64e:41021 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T20:40:42,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41021-0x10159f986630000, quorum=127.0.0.1:57606, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:40:42,836 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43273902{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:40:42,836 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51d47a1d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:40:42,836 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:40:42,836 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65f8aed6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:40:42,836 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@248d74c2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/hadoop.log.dir/,STOPPED} 2024-11-20T20:40:42,838 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:40:42,838 WARN [BP-1587900785-172.17.0.2-1732135191054 heartbeating to localhost/127.0.0.1:43361 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:40:42,838 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:40:42,838 WARN [BP-1587900785-172.17.0.2-1732135191054 heartbeating to localhost/127.0.0.1:43361 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1587900785-172.17.0.2-1732135191054 (Datanode Uuid 27169cd1-b803-48b7-8a75-f42c59c8197a) service to localhost/127.0.0.1:43361 2024-11-20T20:40:42,839 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/cluster_ef69f12f-21d2-d7d4-1d81-ad28bf38015f/data/data3/current/BP-1587900785-172.17.0.2-1732135191054 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:40:42,839 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/cluster_ef69f12f-21d2-d7d4-1d81-ad28bf38015f/data/data4/current/BP-1587900785-172.17.0.2-1732135191054 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:40:42,839 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:40:42,841 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b4813f1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:40:42,841 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ef16bcf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:40:42,841 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:40:42,842 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a67f565{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:40:42,842 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@195069a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/hadoop.log.dir/,STOPPED} 2024-11-20T20:40:42,843 WARN [BP-1587900785-172.17.0.2-1732135191054 heartbeating to localhost/127.0.0.1:43361 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:40:42,843 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:40:42,843 WARN [BP-1587900785-172.17.0.2-1732135191054 heartbeating to localhost/127.0.0.1:43361 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1587900785-172.17.0.2-1732135191054 (Datanode Uuid ddb82209-bcfa-4a98-8282-cb9ecdb9474f) service to localhost/127.0.0.1:43361 2024-11-20T20:40:42,843 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:40:42,844 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/cluster_ef69f12f-21d2-d7d4-1d81-ad28bf38015f/data/data1/current/BP-1587900785-172.17.0.2-1732135191054 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:40:42,844 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/cluster_ef69f12f-21d2-d7d4-1d81-ad28bf38015f/data/data2/current/BP-1587900785-172.17.0.2-1732135191054 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:40:42,844 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:40:42,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d701b2{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T20:40:42,851 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6150e164{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:40:42,851 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:40:42,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@552872f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:40:42,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8d81aab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/hadoop.log.dir/,STOPPED} 2024-11-20T20:40:42,858 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T20:40:42,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T20:40:42,897 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=234 (was 211) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:43361 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:43361 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43361 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:43361 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43361 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43361 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43361 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:43361 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=509 (was 485) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=214 (was 246), ProcessCount=11 (was 11), AvailableMemoryMB=1445 (was 730) - AvailableMemoryMB LEAK? - 2024-11-20T20:40:42,904 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=234, OpenFileDescriptor=509, MaxFileDescriptor=1048576, SystemLoadAverage=214, ProcessCount=11, AvailableMemoryMB=1444 2024-11-20T20:40:42,904 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T20:40:42,904 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/hadoop.log.dir so I do NOT create it in target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6 2024-11-20T20:40:42,904 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/53d824af-1c93-8707-2ad8-fb13f367c366/hadoop.tmp.dir so I do NOT create it in target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6 2024-11-20T20:40:42,904 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/cluster_95bb9e43-eba2-a556-2744-27e26f2f5f10, deleteOnExit=true 2024-11-20T20:40:42,904 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T20:40:42,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/test.cache.data in system properties and HBase conf 2024-11-20T20:40:42,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T20:40:42,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/hadoop.log.dir in system properties and HBase conf 2024-11-20T20:40:42,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T20:40:42,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T20:40:42,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T20:40:42,905 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T20:40:42,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T20:40:42,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T20:40:42,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T20:40:42,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T20:40:42,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T20:40:42,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T20:40:42,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T20:40:42,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T20:40:42,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T20:40:42,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/nfs.dump.dir in system properties and HBase conf 2024-11-20T20:40:42,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/java.io.tmpdir in system properties and HBase conf 2024-11-20T20:40:42,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T20:40:42,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T20:40:42,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T20:40:42,932 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T20:40:43,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:43,262 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:40:43,265 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:40:43,266 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:40:43,266 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:40:43,266 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T20:40:43,267 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:40:43,267 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64047f5d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:40:43,267 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72a3013{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:40:43,358 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4be2fc80{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/java.io.tmpdir/jetty-localhost-38265-hadoop-hdfs-3_4_1-tests_jar-_-any-11260425471628119003/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T20:40:43,358 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6833f9d6{HTTP/1.1, (http/1.1)}{localhost:38265} 2024-11-20T20:40:43,358 INFO [Time-limited test {}] server.Server(415): Started @317065ms 2024-11-20T20:40:43,368 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T20:40:43,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:43,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:43,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:43,616 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:40:43,618 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:40:43,619 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:40:43,619 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:40:43,619 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T20:40:43,619 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18d79289{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:40:43,620 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27b8ac4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:40:43,714 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@18421c56{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/java.io.tmpdir/jetty-localhost-33633-hadoop-hdfs-3_4_1-tests_jar-_-any-5389410752629748074/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:40:43,714 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6adc48be{HTTP/1.1, (http/1.1)}{localhost:33633} 2024-11-20T20:40:43,714 INFO [Time-limited test {}] server.Server(415): Started @317421ms 2024-11-20T20:40:43,715 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:40:43,739 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T20:40:43,742 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T20:40:43,742 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T20:40:43,742 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T20:40:43,743 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T20:40:43,743 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e345901{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/hadoop.log.dir/,AVAILABLE} 2024-11-20T20:40:43,743 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50ea3f07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T20:40:43,836 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f1bdece{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/java.io.tmpdir/jetty-localhost-45245-hadoop-hdfs-3_4_1-tests_jar-_-any-13240471713676948640/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:40:43,836 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1d05b3cb{HTTP/1.1, (http/1.1)}{localhost:45245} 2024-11-20T20:40:43,836 INFO [Time-limited test {}] server.Server(415): Started @317543ms 2024-11-20T20:40:43,837 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T20:40:44,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:44,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:44,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:44,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:45,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:45,240 WARN [Thread-2504 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/cluster_95bb9e43-eba2-a556-2744-27e26f2f5f10/data/data2/current/BP-1886452336-172.17.0.2-1732135242935/current, will proceed with Du for space computation calculation, 2024-11-20T20:40:45,240 WARN [Thread-2503 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/cluster_95bb9e43-eba2-a556-2744-27e26f2f5f10/data/data1/current/BP-1886452336-172.17.0.2-1732135242935/current, will proceed with Du for space computation calculation, 2024-11-20T20:40:45,258 WARN [Thread-2467 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:40:45,260 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x29abd86a4e66583f with lease ID 0x34c9fba4c8725452: Processing first storage report for DS-ff40464c-69db-4ff1-bc0f-d4802e63c507 from datanode DatanodeRegistration(127.0.0.1:42929, datanodeUuid=79a3e18a-f1ff-44b3-9c2b-c6ef1d9ba857, infoPort=38845, infoSecurePort=0, ipcPort=43763, storageInfo=lv=-57;cid=testClusterID;nsid=841416275;c=1732135242935) 2024-11-20T20:40:45,260 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x29abd86a4e66583f with lease ID 0x34c9fba4c8725452: from storage DS-ff40464c-69db-4ff1-bc0f-d4802e63c507 node DatanodeRegistration(127.0.0.1:42929, datanodeUuid=79a3e18a-f1ff-44b3-9c2b-c6ef1d9ba857, infoPort=38845, infoSecurePort=0, ipcPort=43763, storageInfo=lv=-57;cid=testClusterID;nsid=841416275;c=1732135242935), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:40:45,260 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x29abd86a4e66583f with lease ID 0x34c9fba4c8725452: Processing first storage report for DS-bf764219-5de9-4d68-87eb-286b7a8810df from datanode DatanodeRegistration(127.0.0.1:42929, datanodeUuid=79a3e18a-f1ff-44b3-9c2b-c6ef1d9ba857, infoPort=38845, infoSecurePort=0, ipcPort=43763, storageInfo=lv=-57;cid=testClusterID;nsid=841416275;c=1732135242935) 2024-11-20T20:40:45,260 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x29abd86a4e66583f with lease ID 0x34c9fba4c8725452: from storage DS-bf764219-5de9-4d68-87eb-286b7a8810df node DatanodeRegistration(127.0.0.1:42929, datanodeUuid=79a3e18a-f1ff-44b3-9c2b-c6ef1d9ba857, infoPort=38845, infoSecurePort=0, ipcPort=43763, storageInfo=lv=-57;cid=testClusterID;nsid=841416275;c=1732135242935), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:40:45,375 WARN [Thread-2514 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/cluster_95bb9e43-eba2-a556-2744-27e26f2f5f10/data/data3/current/BP-1886452336-172.17.0.2-1732135242935/current, will proceed with Du for space computation calculation, 2024-11-20T20:40:45,376 WARN [Thread-2515 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/cluster_95bb9e43-eba2-a556-2744-27e26f2f5f10/data/data4/current/BP-1886452336-172.17.0.2-1732135242935/current, will proceed with Du for space computation calculation, 2024-11-20T20:40:45,402 WARN [Thread-2490 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T20:40:45,404 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1d039d64a45829c9 with lease ID 0x34c9fba4c8725453: Processing first storage report for DS-d5db81cd-3ee2-4cd5-a6e4-b247cadb731f from datanode DatanodeRegistration(127.0.0.1:42573, datanodeUuid=01d31dbc-f5b6-43ce-8e41-527302f4c493, infoPort=34467, infoSecurePort=0, ipcPort=38417, storageInfo=lv=-57;cid=testClusterID;nsid=841416275;c=1732135242935) 2024-11-20T20:40:45,404 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1d039d64a45829c9 with lease ID 0x34c9fba4c8725453: from storage DS-d5db81cd-3ee2-4cd5-a6e4-b247cadb731f node DatanodeRegistration(127.0.0.1:42573, datanodeUuid=01d31dbc-f5b6-43ce-8e41-527302f4c493, infoPort=34467, infoSecurePort=0, ipcPort=38417, storageInfo=lv=-57;cid=testClusterID;nsid=841416275;c=1732135242935), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:40:45,404 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1d039d64a45829c9 with lease ID 0x34c9fba4c8725453: Processing first storage report for DS-c0b10606-7c4f-40a9-8418-2a880b2a0599 from datanode DatanodeRegistration(127.0.0.1:42573, datanodeUuid=01d31dbc-f5b6-43ce-8e41-527302f4c493, infoPort=34467, infoSecurePort=0, ipcPort=38417, storageInfo=lv=-57;cid=testClusterID;nsid=841416275;c=1732135242935) 2024-11-20T20:40:45,404 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1d039d64a45829c9 with lease ID 0x34c9fba4c8725453: from storage DS-c0b10606-7c4f-40a9-8418-2a880b2a0599 node DatanodeRegistration(127.0.0.1:42573, datanodeUuid=01d31dbc-f5b6-43ce-8e41-527302f4c493, infoPort=34467, infoSecurePort=0, ipcPort=38417, storageInfo=lv=-57;cid=testClusterID;nsid=841416275;c=1732135242935), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T20:40:45,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:45,471 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6 2024-11-20T20:40:45,475 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/cluster_95bb9e43-eba2-a556-2744-27e26f2f5f10/zookeeper_0, clientPort=61719, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/cluster_95bb9e43-eba2-a556-2744-27e26f2f5f10/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/cluster_95bb9e43-eba2-a556-2744-27e26f2f5f10/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T20:40:45,476 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61719 2024-11-20T20:40:45,477 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:40:45,478 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:40:45,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741825_1001 (size=7) 2024-11-20T20:40:45,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741825_1001 (size=7) 2024-11-20T20:40:45,490 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86 with version=8 2024-11-20T20:40:45,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42437/user/jenkins/test-data/2f2c06d1-0cd0-ef4c-3fe9-09665556de18/hbase-staging 2024-11-20T20:40:45,493 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d7d59561a64e:0 server-side Connection retries=45 2024-11-20T20:40:45,493 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:40:45,493 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T20:40:45,493 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T20:40:45,493 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:40:45,493 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T20:40:45,493 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T20:40:45,493 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T20:40:45,494 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44819 2024-11-20T20:40:45,495 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44819 connecting to ZooKeeper ensemble=127.0.0.1:61719 2024-11-20T20:40:45,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:448190x0, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T20:40:45,553 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44819-0x10159fa524d0000 connected 2024-11-20T20:40:45,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:45,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:45,638 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:40:45,641 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:40:45,644 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:40:45,644 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86, hbase.cluster.distributed=false 2024-11-20T20:40:45,647 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T20:40:45,648 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44819 2024-11-20T20:40:45,648 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44819 2024-11-20T20:40:45,648 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44819 2024-11-20T20:40:45,649 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44819 2024-11-20T20:40:45,649 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44819 2024-11-20T20:40:45,667 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d7d59561a64e:0 server-side Connection retries=45 2024-11-20T20:40:45,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:40:45,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T20:40:45,667 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T20:40:45,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T20:40:45,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T20:40:45,667 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T20:40:45,667 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T20:40:45,668 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33463 2024-11-20T20:40:45,668 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33463 connecting to ZooKeeper ensemble=127.0.0.1:61719 2024-11-20T20:40:45,669 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:40:45,670 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:40:45,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:334630x0, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T20:40:45,681 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33463-0x10159fa524d0001 connected 2024-11-20T20:40:45,681 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:40:45,681 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T20:40:45,682 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T20:40:45,683 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T20:40:45,684 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T20:40:45,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33463 2024-11-20T20:40:45,685 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33463 2024-11-20T20:40:45,685 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33463 2024-11-20T20:40:45,685 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33463 2024-11-20T20:40:45,686 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33463 2024-11-20T20:40:45,698 DEBUG [M:0;d7d59561a64e:44819 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d7d59561a64e:44819 2024-11-20T20:40:45,698 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d7d59561a64e,44819,1732135245493 2024-11-20T20:40:45,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:40:45,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:40:45,708 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d7d59561a64e,44819,1732135245493 2024-11-20T20:40:45,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T20:40:45,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:45,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:45,718 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T20:40:45,719 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d7d59561a64e,44819,1732135245493 from backup master directory 2024-11-20T20:40:45,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:40:45,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d7d59561a64e,44819,1732135245493 2024-11-20T20:40:45,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T20:40:45,728 WARN [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T20:40:45,729 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d7d59561a64e,44819,1732135245493 2024-11-20T20:40:45,733 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,733 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,733 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,734 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,734 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,736 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/hbase.id] with ID: 7cd01441-2baf-49c0-b153-6b04a3f351e1 2024-11-20T20:40:45,736 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/.tmp/hbase.id 2024-11-20T20:40:45,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741826_1002 (size=42) 2024-11-20T20:40:45,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741826_1002 (size=42) 2024-11-20T20:40:45,744 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/.tmp/hbase.id]:[hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/hbase.id] 2024-11-20T20:40:45,754 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:40:45,754 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T20:40:45,755 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-20T20:40:45,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:45,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:45,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:45,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741827_1003 (size=196) 2024-11-20T20:40:45,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741827_1003 (size=196) 2024-11-20T20:40:45,770 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T20:40:45,771 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T20:40:45,771 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:40:45,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741828_1004 (size=1189) 2024-11-20T20:40:45,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741828_1004 (size=1189) 2024-11-20T20:40:45,778 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store 2024-11-20T20:40:45,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741829_1005 (size=34) 2024-11-20T20:40:45,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741829_1005 (size=34) 2024-11-20T20:40:45,783 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:40:45,783 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T20:40:45,783 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:40:45,783 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:40:45,783 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T20:40:45,783 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:40:45,783 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:40:45,783 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732135245783Disabling compacts and flushes for region at 1732135245783Disabling writes for close at 1732135245783Writing region close event to WAL at 1732135245783Closed at 1732135245783 2024-11-20T20:40:45,784 WARN [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/.initializing 2024-11-20T20:40:45,784 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/WALs/d7d59561a64e,44819,1732135245493 2024-11-20T20:40:45,786 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C44819%2C1732135245493, suffix=, logDir=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/WALs/d7d59561a64e,44819,1732135245493, archiveDir=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/oldWALs, maxLogs=10 2024-11-20T20:40:45,787 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C44819%2C1732135245493.1732135245786 2024-11-20T20:40:45,791 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/WALs/d7d59561a64e,44819,1732135245493/d7d59561a64e%2C44819%2C1732135245493.1732135245786 2024-11-20T20:40:45,792 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38845:38845),(127.0.0.1/127.0.0.1:34467:34467)] 2024-11-20T20:40:45,795 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:40:45,795 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:40:45,795 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:40:45,795 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:40:45,796 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:40:45,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T20:40:45,797 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:45,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:40:45,798 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:40:45,798 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T20:40:45,798 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:45,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:40:45,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:40:45,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T20:40:45,800 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:45,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:40:45,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:40:45,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T20:40:45,801 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:45,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T20:40:45,802 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:40:45,802 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:40:45,802 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:40:45,803 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:40:45,803 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:40:45,804 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T20:40:45,805 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T20:40:45,806 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:40:45,807 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=723471, jitterRate=-0.08005905151367188}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T20:40:45,807 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732135245795Initializing all the Stores at 1732135245796 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135245796Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135245796Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135245796Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135245796Cleaning up temporary data from old regions at 1732135245803 (+7 ms)Region opened successfully at 1732135245807 (+4 ms) 2024-11-20T20:40:45,807 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T20:40:45,810 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@300a8cb9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d7d59561a64e/172.17.0.2:0 2024-11-20T20:40:45,811 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T20:40:45,811 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T20:40:45,811 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T20:40:45,811 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T20:40:45,812 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T20:40:45,812 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T20:40:45,812 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T20:40:45,814 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T20:40:45,815 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T20:40:45,827 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T20:40:45,828 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T20:40:45,828 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T20:40:45,838 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T20:40:45,838 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T20:40:45,839 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T20:40:45,848 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T20:40:45,849 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T20:40:45,859 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T20:40:45,861 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T20:40:45,869 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T20:40:45,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T20:40:45,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T20:40:45,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:45,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:45,880 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d7d59561a64e,44819,1732135245493, sessionid=0x10159fa524d0000, setting cluster-up flag (Was=false) 2024-11-20T20:40:45,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:45,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:45,933 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T20:40:45,934 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d7d59561a64e,44819,1732135245493 2024-11-20T20:40:45,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:45,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:45,985 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T20:40:45,987 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d7d59561a64e,44819,1732135245493 2024-11-20T20:40:45,988 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T20:40:45,989 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T20:40:45,989 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T20:40:45,989 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T20:40:45,989 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d7d59561a64e,44819,1732135245493 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T20:40:45,991 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:40:45,991 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:40:45,991 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:40:45,991 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d7d59561a64e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T20:40:45,991 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d7d59561a64e:0, corePoolSize=10, maxPoolSize=10 2024-11-20T20:40:45,991 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:40:45,991 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d7d59561a64e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T20:40:45,991 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:40:45,992 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732135275992 2024-11-20T20:40:45,992 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T20:40:45,992 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T20:40:45,992 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T20:40:45,992 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T20:40:45,992 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T20:40:45,992 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T20:40:45,992 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:45,993 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:40:45,993 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T20:40:45,993 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T20:40:45,993 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T20:40:45,993 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T20:40:45,993 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T20:40:45,993 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T20:40:45,993 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135245993,5,FailOnTimeoutGroup] 2024-11-20T20:40:45,994 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135245993,5,FailOnTimeoutGroup] 2024-11-20T20:40:45,994 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:45,994 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T20:40:45,994 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:45,994 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:45,994 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:45,994 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T20:40:45,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741831_1007 (size=1321) 2024-11-20T20:40:45,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741831_1007 (size=1321) 2024-11-20T20:40:46,000 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T20:40:46,000 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86 2024-11-20T20:40:46,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741832_1008 (size=32) 2024-11-20T20:40:46,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741832_1008 (size=32) 2024-11-20T20:40:46,009 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:40:46,010 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T20:40:46,011 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T20:40:46,011 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:46,011 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:40:46,011 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T20:40:46,012 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T20:40:46,012 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:46,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:40:46,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T20:40:46,014 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T20:40:46,014 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:46,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:40:46,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T20:40:46,015 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T20:40:46,015 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:46,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:40:46,016 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T20:40:46,016 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/data/hbase/meta/1588230740 2024-11-20T20:40:46,017 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/data/hbase/meta/1588230740 2024-11-20T20:40:46,018 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T20:40:46,018 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T20:40:46,018 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T20:40:46,019 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T20:40:46,021 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T20:40:46,022 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=807720, jitterRate=0.027069956064224243}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T20:40:46,022 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732135246009Initializing all the Stores at 1732135246010 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135246010Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135246010Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135246010Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135246010Cleaning up temporary data from old regions at 1732135246018 (+8 ms)Region opened successfully at 1732135246022 (+4 ms) 2024-11-20T20:40:46,022 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T20:40:46,022 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T20:40:46,022 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T20:40:46,022 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T20:40:46,022 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T20:40:46,023 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T20:40:46,023 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732135246022Disabling compacts and flushes for region at 1732135246022Disabling writes for close at 1732135246022Writing region close event to WAL at 1732135246023 (+1 ms)Closed at 1732135246023 2024-11-20T20:40:46,024 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:40:46,024 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T20:40:46,024 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T20:40:46,025 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T20:40:46,026 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T20:40:46,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:46,088 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.HRegionServer(746): ClusterId : 7cd01441-2baf-49c0-b153-6b04a3f351e1 2024-11-20T20:40:46,088 DEBUG [RS:0;d7d59561a64e:33463 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T20:40:46,103 DEBUG [RS:0;d7d59561a64e:33463 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T20:40:46,103 DEBUG [RS:0;d7d59561a64e:33463 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T20:40:46,113 DEBUG [RS:0;d7d59561a64e:33463 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T20:40:46,114 DEBUG [RS:0;d7d59561a64e:33463 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4096084c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d7d59561a64e/172.17.0.2:0 2024-11-20T20:40:46,130 DEBUG [RS:0;d7d59561a64e:33463 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d7d59561a64e:33463 2024-11-20T20:40:46,130 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T20:40:46,130 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T20:40:46,130 DEBUG [RS:0;d7d59561a64e:33463 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T20:40:46,131 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.HRegionServer(2659): reportForDuty to master=d7d59561a64e,44819,1732135245493 with port=33463, startcode=1732135245666 2024-11-20T20:40:46,131 DEBUG [RS:0;d7d59561a64e:33463 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T20:40:46,133 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45915, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T20:40:46,133 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44819 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d7d59561a64e,33463,1732135245666 2024-11-20T20:40:46,133 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44819 {}] master.ServerManager(517): Registering regionserver=d7d59561a64e,33463,1732135245666 2024-11-20T20:40:46,134 DEBUG [RS:0;d7d59561a64e:33463 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86 2024-11-20T20:40:46,135 DEBUG [RS:0;d7d59561a64e:33463 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40503 2024-11-20T20:40:46,135 DEBUG [RS:0;d7d59561a64e:33463 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T20:40:46,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T20:40:46,144 DEBUG [RS:0;d7d59561a64e:33463 {}] zookeeper.ZKUtil(111): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d7d59561a64e,33463,1732135245666 2024-11-20T20:40:46,144 WARN [RS:0;d7d59561a64e:33463 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T20:40:46,144 INFO [RS:0;d7d59561a64e:33463 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:40:46,144 DEBUG [RS:0;d7d59561a64e:33463 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/WALs/d7d59561a64e,33463,1732135245666 2024-11-20T20:40:46,144 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d7d59561a64e,33463,1732135245666] 2024-11-20T20:40:46,146 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T20:40:46,147 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T20:40:46,148 INFO [RS:0;d7d59561a64e:33463 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T20:40:46,148 INFO [RS:0;d7d59561a64e:33463 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,148 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T20:40:46,149 INFO [RS:0;d7d59561a64e:33463 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T20:40:46,149 INFO [RS:0;d7d59561a64e:33463 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,149 DEBUG [RS:0;d7d59561a64e:33463 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:40:46,149 DEBUG [RS:0;d7d59561a64e:33463 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:40:46,149 DEBUG [RS:0;d7d59561a64e:33463 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:40:46,149 DEBUG [RS:0;d7d59561a64e:33463 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:40:46,150 DEBUG [RS:0;d7d59561a64e:33463 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:40:46,150 DEBUG [RS:0;d7d59561a64e:33463 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d7d59561a64e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T20:40:46,150 DEBUG [RS:0;d7d59561a64e:33463 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:40:46,150 DEBUG [RS:0;d7d59561a64e:33463 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:40:46,150 DEBUG [RS:0;d7d59561a64e:33463 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:40:46,150 DEBUG [RS:0;d7d59561a64e:33463 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:40:46,150 DEBUG [RS:0;d7d59561a64e:33463 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:40:46,150 DEBUG [RS:0;d7d59561a64e:33463 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d7d59561a64e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T20:40:46,150 DEBUG [RS:0;d7d59561a64e:33463 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d7d59561a64e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T20:40:46,150 DEBUG [RS:0;d7d59561a64e:33463 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d7d59561a64e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T20:40:46,150 INFO [RS:0;d7d59561a64e:33463 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,151 INFO [RS:0;d7d59561a64e:33463 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,151 INFO [RS:0;d7d59561a64e:33463 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,151 INFO [RS:0;d7d59561a64e:33463 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,151 INFO [RS:0;d7d59561a64e:33463 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,151 INFO [RS:0;d7d59561a64e:33463 {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,33463,1732135245666-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T20:40:46,166 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T20:40:46,166 INFO [RS:0;d7d59561a64e:33463 {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,33463,1732135245666-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,167 INFO [RS:0;d7d59561a64e:33463 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,167 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.Replication(171): d7d59561a64e,33463,1732135245666 started 2024-11-20T20:40:46,176 WARN [d7d59561a64e:44819 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T20:40:46,178 INFO [RS:0;d7d59561a64e:33463 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,178 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.HRegionServer(1482): Serving as d7d59561a64e,33463,1732135245666, RpcServer on d7d59561a64e/172.17.0.2:33463, sessionid=0x10159fa524d0001 2024-11-20T20:40:46,178 DEBUG [RS:0;d7d59561a64e:33463 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T20:40:46,178 DEBUG [RS:0;d7d59561a64e:33463 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d7d59561a64e,33463,1732135245666 2024-11-20T20:40:46,178 DEBUG [RS:0;d7d59561a64e:33463 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd7d59561a64e,33463,1732135245666' 2024-11-20T20:40:46,178 DEBUG [RS:0;d7d59561a64e:33463 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T20:40:46,178 DEBUG [RS:0;d7d59561a64e:33463 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T20:40:46,179 DEBUG [RS:0;d7d59561a64e:33463 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T20:40:46,179 DEBUG [RS:0;d7d59561a64e:33463 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T20:40:46,179 DEBUG [RS:0;d7d59561a64e:33463 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d7d59561a64e,33463,1732135245666 2024-11-20T20:40:46,179 DEBUG [RS:0;d7d59561a64e:33463 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd7d59561a64e,33463,1732135245666' 2024-11-20T20:40:46,179 DEBUG [RS:0;d7d59561a64e:33463 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T20:40:46,179 DEBUG [RS:0;d7d59561a64e:33463 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T20:40:46,180 DEBUG [RS:0;d7d59561a64e:33463 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T20:40:46,180 INFO [RS:0;d7d59561a64e:33463 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T20:40:46,180 INFO [RS:0;d7d59561a64e:33463 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T20:40:46,270 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T20:40:46,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,272 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,272 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,272 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,273 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,283 INFO [RS:0;d7d59561a64e:33463 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C33463%2C1732135245666, suffix=, logDir=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/WALs/d7d59561a64e,33463,1732135245666, archiveDir=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/oldWALs, maxLogs=32 2024-11-20T20:40:46,284 INFO [RS:0;d7d59561a64e:33463 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C33463%2C1732135245666.1732135246283 2024-11-20T20:40:46,288 INFO [RS:0;d7d59561a64e:33463 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/WALs/d7d59561a64e,33463,1732135245666/d7d59561a64e%2C33463%2C1732135245666.1732135246283 2024-11-20T20:40:46,291 DEBUG [RS:0;d7d59561a64e:33463 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38845:38845),(127.0.0.1/127.0.0.1:34467:34467)] 2024-11-20T20:40:46,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T20:40:46,426 DEBUG [d7d59561a64e:44819 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T20:40:46,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:46,427 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d7d59561a64e,33463,1732135245666 2024-11-20T20:40:46,429 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d7d59561a64e,33463,1732135245666, state=OPENING 2024-11-20T20:40:46,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T20:40:46,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T20:40:46,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-20T20:40:46,522 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T20:40:46,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:46,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:46,535 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:40:46,535 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T20:40:46,535 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:40:46,535 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d7d59561a64e,33463,1732135245666}] 2024-11-20T20:40:46,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,35431,1732135043885/d7d59561a64e%2C35431%2C1732135043885.1732135044133 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:46,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.meta.1732135043724.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:46,691 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T20:40:46,693 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45441, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T20:40:46,697 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T20:40:46,697 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:40:46,699 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d7d59561a64e%2C33463%2C1732135245666.meta, suffix=.meta, logDir=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/WALs/d7d59561a64e,33463,1732135245666, archiveDir=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/oldWALs, maxLogs=32 2024-11-20T20:40:46,700 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d7d59561a64e%2C33463%2C1732135245666.meta.1732135246699.meta 2024-11-20T20:40:46,706 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/WALs/d7d59561a64e,33463,1732135245666/d7d59561a64e%2C33463%2C1732135245666.meta.1732135246699.meta 2024-11-20T20:40:46,707 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34467:34467),(127.0.0.1/127.0.0.1:38845:38845)] 2024-11-20T20:40:46,707 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T20:40:46,708 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T20:40:46,708 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T20:40:46,708 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T20:40:46,708 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T20:40:46,708 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T20:40:46,708 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T20:40:46,708 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T20:40:46,709 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T20:40:46,710 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T20:40:46,710 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:46,710 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:40:46,711 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T20:40:46,711 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T20:40:46,711 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:46,712 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:40:46,712 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T20:40:46,712 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T20:40:46,712 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:46,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:40:46,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T20:40:46,713 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T20:40:46,713 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T20:40:46,714 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T20:40:46,714 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T20:40:46,714 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/data/hbase/meta/1588230740 2024-11-20T20:40:46,715 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/data/hbase/meta/1588230740 2024-11-20T20:40:46,717 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T20:40:46,717 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T20:40:46,717 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T20:40:46,718 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T20:40:46,719 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=798684, jitterRate=0.015579774975776672}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T20:40:46,719 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T20:40:46,719 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732135246708Writing region info on filesystem at 1732135246708Initializing all the Stores at 1732135246709 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135246709Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135246709Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732135246709Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732135246709Cleaning up temporary data from old regions at 1732135246717 (+8 ms)Running coprocessor post-open hooks at 1732135246719 (+2 ms)Region opened successfully at 1732135246719 2024-11-20T20:40:46,720 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732135246691 2024-11-20T20:40:46,722 DEBUG [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T20:40:46,722 INFO [RS_OPEN_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T20:40:46,723 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d7d59561a64e,33463,1732135245666 2024-11-20T20:40:46,723 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d7d59561a64e,33463,1732135245666, state=OPEN 2024-11-20T20:40:46,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T20:40:46,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T20:40:46,764 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d7d59561a64e,33463,1732135245666 2024-11-20T20:40:46,764 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:40:46,764 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T20:40:46,769 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T20:40:46,769 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d7d59561a64e,33463,1732135245666 in 229 msec 2024-11-20T20:40:46,774 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T20:40:46,774 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 745 msec 2024-11-20T20:40:46,776 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T20:40:46,776 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T20:40:46,777 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T20:40:46,777 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d7d59561a64e,33463,1732135245666, seqNum=-1] 2024-11-20T20:40:46,778 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T20:40:46,779 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60307, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T20:40:46,783 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 794 msec 2024-11-20T20:40:46,783 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732135246783, completionTime=-1 2024-11-20T20:40:46,783 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T20:40:46,783 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T20:40:46,785 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T20:40:46,785 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732135306785 2024-11-20T20:40:46,785 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732135366785 2024-11-20T20:40:46,785 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-20T20:40:46,785 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,44819,1732135245493-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,785 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,44819,1732135245493-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,785 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,44819,1732135245493-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,785 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d7d59561a64e:44819, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,786 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,786 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,787 DEBUG [master/d7d59561a64e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T20:40:46,789 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.060sec 2024-11-20T20:40:46,789 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T20:40:46,789 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T20:40:46,789 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T20:40:46,789 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T20:40:46,789 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T20:40:46,789 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,44819,1732135245493-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T20:40:46,789 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,44819,1732135245493-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T20:40:46,791 DEBUG [master/d7d59561a64e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T20:40:46,791 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T20:40:46,791 INFO [master/d7d59561a64e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d7d59561a64e,44819,1732135245493-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T20:40:46,889 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54fa2501, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:40:46,889 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d7d59561a64e,44819,-1 for getting cluster id 2024-11-20T20:40:46,889 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T20:40:46,892 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7cd01441-2baf-49c0-b153-6b04a3f351e1' 2024-11-20T20:40:46,892 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T20:40:46,892 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7cd01441-2baf-49c0-b153-6b04a3f351e1" 2024-11-20T20:40:46,893 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e7671da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:40:46,893 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d7d59561a64e,44819,-1] 2024-11-20T20:40:46,893 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T20:40:46,894 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:40:46,895 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40278, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T20:40:46,897 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4125913c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T20:40:46,898 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T20:40:46,899 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d7d59561a64e,33463,1732135245666, seqNum=-1] 2024-11-20T20:40:46,899 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T20:40:46,900 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40368, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T20:40:46,902 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d7d59561a64e,44819,1732135245493 2024-11-20T20:40:46,902 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T20:40:46,905 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T20:40:46,905 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T20:40:46,907 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/WALs/test.com,8080,1, archiveDir=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/oldWALs, maxLogs=32 2024-11-20T20:40:46,907 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732135246907 2024-11-20T20:40:46,913 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/WALs/test.com,8080,1/test.com%2C8080%2C1.1732135246907 2024-11-20T20:40:46,917 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38845:38845),(127.0.0.1/127.0.0.1:34467:34467)] 2024-11-20T20:40:46,918 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732135246917 2024-11-20T20:40:46,922 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:46,922 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:46,922 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:46,922 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:46,922 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:46,923 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/WALs/test.com,8080,1/test.com%2C8080%2C1.1732135246907 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/WALs/test.com,8080,1/test.com%2C8080%2C1.1732135246917 2024-11-20T20:40:46,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741835_1011 (size=93) 2024-11-20T20:40:46,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741835_1011 (size=93) 2024-11-20T20:40:46,928 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34467:34467),(127.0.0.1/127.0.0.1:38845:38845)] 2024-11-20T20:40:46,928 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/WALs/test.com,8080,1/test.com%2C8080%2C1.1732135246907 to hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/oldWALs/test.com%2C8080%2C1.1732135246907 2024-11-20T20:40:46,929 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:46,929 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:46,929 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:46,929 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:46,929 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:46,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741836_1012 (size=93) 2024-11-20T20:40:46,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741836_1012 (size=93) 2024-11-20T20:40:46,932 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/oldWALs 2024-11-20T20:40:46,932 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732135246917) 2024-11-20T20:40:46,932 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T20:40:46,933 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T20:40:46,933 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:40:46,933 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:40:46,933 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:40:46,933 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T20:40:46,933 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T20:40:46,933 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=882885887, stopped=false 2024-11-20T20:40:46,933 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d7d59561a64e,44819,1732135245493 2024-11-20T20:40:46,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T20:40:46,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T20:40:46,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:46,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:46,954 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T20:40:46,954 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T20:40:46,954 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:40:46,954 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:40:46,954 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:40:46,954 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T20:40:46,954 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd7d59561a64e,33463,1732135245666' ***** 2024-11-20T20:40:46,954 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T20:40:46,955 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T20:40:46,955 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T20:40:46,955 INFO [RS:0;d7d59561a64e:33463 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T20:40:46,955 INFO [RS:0;d7d59561a64e:33463 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T20:40:46,955 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.HRegionServer(959): stopping server d7d59561a64e,33463,1732135245666 2024-11-20T20:40:46,955 INFO [RS:0;d7d59561a64e:33463 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T20:40:46,955 INFO [RS:0;d7d59561a64e:33463 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d7d59561a64e:33463. 2024-11-20T20:40:46,955 DEBUG [RS:0;d7d59561a64e:33463 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T20:40:46,955 DEBUG [RS:0;d7d59561a64e:33463 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:40:46,955 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T20:40:46,955 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T20:40:46,955 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T20:40:46,955 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T20:40:46,955 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-20T20:40:46,955 DEBUG [RS:0;d7d59561a64e:33463 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-20T20:40:46,955 DEBUG [RS:0;d7d59561a64e:33463 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-20T20:40:46,955 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T20:40:46,955 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T20:40:46,956 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T20:40:46,956 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T20:40:46,956 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T20:40:46,956 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-20T20:40:46,970 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/data/hbase/meta/1588230740/.tmp/ns/92c5b483d0ba4156bdbbc30a4bf9b85a is 43, key is default/ns:d/1732135246779/Put/seqid=0 2024-11-20T20:40:46,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741837_1013 (size=5153) 2024-11-20T20:40:46,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741837_1013 (size=5153) 2024-11-20T20:40:46,974 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/data/hbase/meta/1588230740/.tmp/ns/92c5b483d0ba4156bdbbc30a4bf9b85a 2024-11-20T20:40:46,979 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/data/hbase/meta/1588230740/.tmp/ns/92c5b483d0ba4156bdbbc30a4bf9b85a as hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/data/hbase/meta/1588230740/ns/92c5b483d0ba4156bdbbc30a4bf9b85a 2024-11-20T20:40:46,984 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/data/hbase/meta/1588230740/ns/92c5b483d0ba4156bdbbc30a4bf9b85a, entries=2, sequenceid=6, filesize=5.0 K 2024-11-20T20:40:46,985 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false 2024-11-20T20:40:46,985 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T20:40:46,989 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T20:40:46,990 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T20:40:46,990 INFO [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T20:40:46,990 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732135246955Running coprocessor pre-close hooks at 1732135246955Disabling compacts and flushes for region at 1732135246955Disabling writes for close at 1732135246956 (+1 ms)Obtaining lock to block concurrent updates at 1732135246956Preparing flush snapshotting stores in 1588230740 at 1732135246956Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732135246956Flushing stores of hbase:meta,,1.1588230740 at 1732135246956Flushing 1588230740/ns: creating writer at 1732135246957 (+1 ms)Flushing 1588230740/ns: appending metadata at 1732135246969 (+12 ms)Flushing 1588230740/ns: closing flushed file at 1732135246969Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32b04781: reopening flushed file at 1732135246978 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false at 1732135246985 (+7 ms)Writing region close event to WAL at 1732135246986 (+1 ms)Running coprocessor post-close hooks at 1732135246990 (+4 ms)Closed at 1732135246990 2024-11-20T20:40:46,990 DEBUG [RS_CLOSE_META-regionserver/d7d59561a64e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T20:40:47,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/MasterData/WALs/d7d59561a64e,38137,1732135042252/d7d59561a64e%2C38137%2C1732135042252.1732135042557 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:47,151 INFO [regionserver/d7d59561a64e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T20:40:47,151 INFO [regionserver/d7d59561a64e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T20:40:47,156 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.HRegionServer(976): stopping server d7d59561a64e,33463,1732135245666; all regions closed. 2024-11-20T20:40:47,157 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:47,157 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:47,157 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:47,158 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:47,158 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:47,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741834_1010 (size=1152) 2024-11-20T20:40:47,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741834_1010 (size=1152) 2024-11-20T20:40:47,168 DEBUG [RS:0;d7d59561a64e:33463 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/oldWALs 2024-11-20T20:40:47,168 INFO [RS:0;d7d59561a64e:33463 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d7d59561a64e%2C33463%2C1732135245666.meta:.meta(num 1732135246699) 2024-11-20T20:40:47,168 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:47,169 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:47,169 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:47,169 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:47,169 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:47,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741833_1009 (size=93) 2024-11-20T20:40:47,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741833_1009 (size=93) 2024-11-20T20:40:47,174 DEBUG [RS:0;d7d59561a64e:33463 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/oldWALs 2024-11-20T20:40:47,174 INFO [RS:0;d7d59561a64e:33463 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d7d59561a64e%2C33463%2C1732135245666:(num 1732135246283) 2024-11-20T20:40:47,174 DEBUG [RS:0;d7d59561a64e:33463 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T20:40:47,174 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T20:40:47,175 INFO [RS:0;d7d59561a64e:33463 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T20:40:47,175 INFO [RS:0;d7d59561a64e:33463 {}] hbase.ChoreService(370): Chore service for: regionserver/d7d59561a64e:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T20:40:47,175 INFO [RS:0;d7d59561a64e:33463 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T20:40:47,175 INFO [regionserver/d7d59561a64e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T20:40:47,175 INFO [RS:0;d7d59561a64e:33463 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33463 2024-11-20T20:40:47,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d7d59561a64e,33463,1732135245666 2024-11-20T20:40:47,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T20:40:47,185 INFO [RS:0;d7d59561a64e:33463 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T20:40:47,196 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d7d59561a64e,33463,1732135245666] 2024-11-20T20:40:47,206 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d7d59561a64e,33463,1732135245666 already deleted, retry=false 2024-11-20T20:40:47,206 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d7d59561a64e,33463,1732135245666 expired; onlineServers=0 2024-11-20T20:40:47,206 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd7d59561a64e,44819,1732135245493' ***** 2024-11-20T20:40:47,206 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T20:40:47,206 INFO [M:0;d7d59561a64e:44819 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T20:40:47,206 INFO [M:0;d7d59561a64e:44819 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T20:40:47,207 DEBUG [M:0;d7d59561a64e:44819 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T20:40:47,207 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T20:40:47,207 DEBUG [M:0;d7d59561a64e:44819 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T20:40:47,207 DEBUG [master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135245993 {}] cleaner.HFileCleaner(306): Exit Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.large.0-1732135245993,5,FailOnTimeoutGroup] 2024-11-20T20:40:47,207 DEBUG [master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135245993 {}] cleaner.HFileCleaner(306): Exit Thread[master/d7d59561a64e:0:becomeActiveMaster-HFileCleaner.small.0-1732135245993,5,FailOnTimeoutGroup] 2024-11-20T20:40:47,207 INFO [M:0;d7d59561a64e:44819 {}] hbase.ChoreService(370): Chore service for: master/d7d59561a64e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T20:40:47,207 INFO [M:0;d7d59561a64e:44819 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T20:40:47,207 DEBUG [M:0;d7d59561a64e:44819 {}] master.HMaster(1795): Stopping service threads 2024-11-20T20:40:47,207 INFO [M:0;d7d59561a64e:44819 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T20:40:47,207 INFO [M:0;d7d59561a64e:44819 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T20:40:47,207 INFO [M:0;d7d59561a64e:44819 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T20:40:47,207 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T20:40:47,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T20:40:47,217 DEBUG [M:0;d7d59561a64e:44819 {}] zookeeper.ZKUtil(347): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T20:40:47,217 WARN [M:0;d7d59561a64e:44819 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T20:40:47,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T20:40:47,218 INFO [M:0;d7d59561a64e:44819 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/.lastflushedseqids 2024-11-20T20:40:47,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741838_1014 (size=99) 2024-11-20T20:40:47,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741838_1014 (size=99) 2024-11-20T20:40:47,226 INFO [M:0;d7d59561a64e:44819 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T20:40:47,226 INFO [M:0;d7d59561a64e:44819 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T20:40:47,227 DEBUG [M:0;d7d59561a64e:44819 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T20:40:47,227 INFO [M:0;d7d59561a64e:44819 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:40:47,227 DEBUG [M:0;d7d59561a64e:44819 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:40:47,227 DEBUG [M:0;d7d59561a64e:44819 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T20:40:47,227 DEBUG [M:0;d7d59561a64e:44819 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:40:47,227 INFO [M:0;d7d59561a64e:44819 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-20T20:40:47,246 DEBUG [M:0;d7d59561a64e:44819 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/236bcda244cc4ee2b71a32a9c6449a6d is 82, key is hbase:meta,,1/info:regioninfo/1732135246722/Put/seqid=0 2024-11-20T20:40:47,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741839_1015 (size=5672) 2024-11-20T20:40:47,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741839_1015 (size=5672) 2024-11-20T20:40:47,250 INFO [M:0;d7d59561a64e:44819 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/236bcda244cc4ee2b71a32a9c6449a6d 2024-11-20T20:40:47,269 DEBUG [M:0;d7d59561a64e:44819 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/232570c7aacf44209c92b0c1aee4b629 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732135246783/Put/seqid=0 2024-11-20T20:40:47,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741840_1016 (size=5275) 2024-11-20T20:40:47,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741840_1016 (size=5275) 2024-11-20T20:40:47,273 INFO [M:0;d7d59561a64e:44819 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/232570c7aacf44209c92b0c1aee4b629 2024-11-20T20:40:47,290 DEBUG [M:0;d7d59561a64e:44819 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dd6c1503d7b84ed98d678b4a7763b106 is 69, key is d7d59561a64e,33463,1732135245666/rs:state/1732135246133/Put/seqid=0 2024-11-20T20:40:47,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741841_1017 (size=5156) 2024-11-20T20:40:47,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741841_1017 (size=5156) 2024-11-20T20:40:47,295 INFO [M:0;d7d59561a64e:44819 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dd6c1503d7b84ed98d678b4a7763b106 2024-11-20T20:40:47,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:40:47,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33463-0x10159fa524d0001, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:40:47,296 INFO [RS:0;d7d59561a64e:33463 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T20:40:47,296 INFO [RS:0;d7d59561a64e:33463 {}] regionserver.HRegionServer(1031): Exiting; stopping=d7d59561a64e,33463,1732135245666; zookeeper connection closed. 2024-11-20T20:40:47,297 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@ef93578 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@ef93578 2024-11-20T20:40:47,297 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T20:40:47,311 DEBUG [M:0;d7d59561a64e:44819 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7e818def4c0b4c02b6e0a6e65a96846f is 52, key is load_balancer_on/state:d/1732135246904/Put/seqid=0 2024-11-20T20:40:47,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741842_1018 (size=5056) 2024-11-20T20:40:47,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741842_1018 (size=5056) 2024-11-20T20:40:47,316 INFO [M:0;d7d59561a64e:44819 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7e818def4c0b4c02b6e0a6e65a96846f 2024-11-20T20:40:47,320 DEBUG [M:0;d7d59561a64e:44819 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/236bcda244cc4ee2b71a32a9c6449a6d as hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/236bcda244cc4ee2b71a32a9c6449a6d 2024-11-20T20:40:47,324 INFO [M:0;d7d59561a64e:44819 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/236bcda244cc4ee2b71a32a9c6449a6d, entries=8, sequenceid=29, filesize=5.5 K 2024-11-20T20:40:47,325 DEBUG [M:0;d7d59561a64e:44819 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/232570c7aacf44209c92b0c1aee4b629 as hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/232570c7aacf44209c92b0c1aee4b629 2024-11-20T20:40:47,329 INFO [M:0;d7d59561a64e:44819 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/232570c7aacf44209c92b0c1aee4b629, entries=3, sequenceid=29, filesize=5.2 K 2024-11-20T20:40:47,330 DEBUG [M:0;d7d59561a64e:44819 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dd6c1503d7b84ed98d678b4a7763b106 as hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dd6c1503d7b84ed98d678b4a7763b106 2024-11-20T20:40:47,335 INFO [M:0;d7d59561a64e:44819 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dd6c1503d7b84ed98d678b4a7763b106, entries=1, sequenceid=29, filesize=5.0 K 2024-11-20T20:40:47,336 DEBUG [M:0;d7d59561a64e:44819 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7e818def4c0b4c02b6e0a6e65a96846f as hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7e818def4c0b4c02b6e0a6e65a96846f 2024-11-20T20:40:47,340 INFO [M:0;d7d59561a64e:44819 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40503/user/jenkins/test-data/4e63e37c-9227-dac8-4728-b17406441c86/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7e818def4c0b4c02b6e0a6e65a96846f, entries=1, sequenceid=29, filesize=4.9 K 2024-11-20T20:40:47,341 INFO [M:0;d7d59561a64e:44819 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=29, compaction requested=false 2024-11-20T20:40:47,342 INFO [M:0;d7d59561a64e:44819 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T20:40:47,342 DEBUG [M:0;d7d59561a64e:44819 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732135247227Disabling compacts and flushes for region at 1732135247227Disabling writes for close at 1732135247227Obtaining lock to block concurrent updates at 1732135247227Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732135247227Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732135247227Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732135247228 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732135247228Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732135247246 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732135247246Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732135247254 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732135247268 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732135247268Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732135247278 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732135247290 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732135247290Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732135247299 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732135247311 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732135247311Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4041d1: reopening flushed file at 1732135247319 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7bfc1fb1: reopening flushed file at 1732135247324 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@463dc146: reopening flushed file at 1732135247329 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6cd6ed40: reopening flushed file at 1732135247335 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=29, compaction requested=false at 1732135247341 (+6 ms)Writing region close event to WAL at 1732135247342 (+1 ms)Closed at 1732135247342 2024-11-20T20:40:47,343 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:47,343 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:47,343 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:47,343 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:47,343 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T20:40:47,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42573 is added to blk_1073741830_1006 (size=10311) 2024-11-20T20:40:47,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42929 is added to blk_1073741830_1006 (size=10311) 2024-11-20T20:40:47,345 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T20:40:47,345 INFO [M:0;d7d59561a64e:44819 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T20:40:47,345 INFO [M:0;d7d59561a64e:44819 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44819 2024-11-20T20:40:47,346 INFO [M:0;d7d59561a64e:44819 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T20:40:47,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44749/user/jenkins/test-data/ce6c8851-c77b-354b-416c-9a8ecff2b2e5/WALs/d7d59561a64e,39199,1732135042427/d7d59561a64e%2C39199%2C1732135042427.1732135043207 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T20:40:47,454 INFO [M:0;d7d59561a64e:44819 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T20:40:47,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:40:47,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44819-0x10159fa524d0000, quorum=127.0.0.1:61719, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T20:40:47,459 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f1bdece{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:40:47,460 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1d05b3cb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:40:47,460 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:40:47,460 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50ea3f07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:40:47,460 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e345901{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/hadoop.log.dir/,STOPPED} 2024-11-20T20:40:47,463 WARN [BP-1886452336-172.17.0.2-1732135242935 heartbeating to localhost/127.0.0.1:40503 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:40:47,463 WARN [BP-1886452336-172.17.0.2-1732135242935 heartbeating to localhost/127.0.0.1:40503 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1886452336-172.17.0.2-1732135242935 (Datanode Uuid 01d31dbc-f5b6-43ce-8e41-527302f4c493) service to localhost/127.0.0.1:40503 2024-11-20T20:40:47,463 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:40:47,463 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:40:47,464 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/cluster_95bb9e43-eba2-a556-2744-27e26f2f5f10/data/data3/current/BP-1886452336-172.17.0.2-1732135242935 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:40:47,465 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/cluster_95bb9e43-eba2-a556-2744-27e26f2f5f10/data/data4/current/BP-1886452336-172.17.0.2-1732135242935 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:40:47,465 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:40:47,467 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@18421c56{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T20:40:47,467 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6adc48be{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:40:47,467 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:40:47,467 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27b8ac4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:40:47,467 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18d79289{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/hadoop.log.dir/,STOPPED} 2024-11-20T20:40:47,468 WARN [BP-1886452336-172.17.0.2-1732135242935 heartbeating to localhost/127.0.0.1:40503 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T20:40:47,468 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T20:40:47,468 WARN [BP-1886452336-172.17.0.2-1732135242935 heartbeating to localhost/127.0.0.1:40503 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1886452336-172.17.0.2-1732135242935 (Datanode Uuid 79a3e18a-f1ff-44b3-9c2b-c6ef1d9ba857) service to localhost/127.0.0.1:40503 2024-11-20T20:40:47,468 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T20:40:47,468 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/cluster_95bb9e43-eba2-a556-2744-27e26f2f5f10/data/data1/current/BP-1886452336-172.17.0.2-1732135242935 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:40:47,469 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/cluster_95bb9e43-eba2-a556-2744-27e26f2f5f10/data/data2/current/BP-1886452336-172.17.0.2-1732135242935 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T20:40:47,469 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T20:40:47,473 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4be2fc80{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T20:40:47,473 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6833f9d6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T20:40:47,473 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T20:40:47,473 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72a3013{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T20:40:47,473 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64047f5d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b50ea683-8577-e769-f7cf-48810e7c6ed6/hadoop.log.dir/,STOPPED} 2024-11-20T20:40:47,479 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T20:40:47,493 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T20:40:47,500 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=274 (was 234) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40503 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40503 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:40503 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:40503 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:40503 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:40503 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40503 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40503 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=532 (was 509) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=212 (was 214), ProcessCount=11 (was 11), AvailableMemoryMB=1422 (was 1444)